0. Beginning stuff

library(multcomp)
## Loading required package: mvtnorm
## Loading required package: survival
## Loading required package: TH.data
## Loading required package: MASS
## 
## Attaching package: 'TH.data'
## The following object is masked from 'package:MASS':
## 
##     geyser
library(lsr)
library(pwr)
library(psych)
library(tidyverse)
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr     1.1.3     ✔ readr     2.1.4
## ✔ forcats   1.0.0     ✔ stringr   1.5.0
## ✔ ggplot2   3.4.3     ✔ tibble    3.2.1
## ✔ lubridate 1.9.3     ✔ tidyr     1.3.0
## ✔ purrr     1.0.2
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ ggplot2::%+%()   masks psych::%+%()
## ✖ ggplot2::alpha() masks psych::alpha()
## ✖ dplyr::filter()  masks stats::filter()
## ✖ dplyr::lag()     masks stats::lag()
## ✖ dplyr::select()  masks MASS::select()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(rio)
library(qualtRics)
library(data.table)
## 
## Attaching package: 'data.table'
## 
## The following objects are masked from 'package:lubridate':
## 
##     hour, isoweek, mday, minute, month, quarter, second, wday, week,
##     yday, year
## 
## The following objects are masked from 'package:dplyr':
## 
##     between, first, last
## 
## The following object is masked from 'package:purrr':
## 
##     transpose
library(dplyr)
library(readr)
library(tibble)
library(rlang)
## 
## Attaching package: 'rlang'
## 
## The following object is masked from 'package:data.table':
## 
##     :=
## 
## The following objects are masked from 'package:purrr':
## 
##     %@%, flatten, flatten_chr, flatten_dbl, flatten_int, flatten_lgl,
##     flatten_raw, invoke, splice
library(cjoint)
## Loading required package: sandwich
## Loading required package: lmtest
## Loading required package: zoo
## 
## Attaching package: 'zoo'
## 
## The following objects are masked from 'package:base':
## 
##     as.Date, as.Date.numeric
## 
## Loading required package: survey
## Loading required package: grid
## Loading required package: Matrix
## 
## Attaching package: 'Matrix'
## 
## The following objects are masked from 'package:tidyr':
## 
##     expand, pack, unpack
## 
## 
## Attaching package: 'survey'
## 
## The following object is masked from 'package:graphics':
## 
##     dotchart
## 
## cjoint: AMCE Estimator for Conjoint Experiments
## Version: 2.1.1
## Authors: Soubhik Barari, Elissa Berwick, Jens Hainmueller, Daniel Hopkins, Sean Liu, Anton Strezhnev, Teppei Yamamoto
## 
## 
## Attaching package: 'cjoint'
## 
## The following object is masked from 'package:tibble':
## 
##     view
library(ggpubr) # version 0.2, loading required package 'magrittr'
library(agricolae)
library(rstatix)
## 
## Attaching package: 'rstatix'
## 
## The following object is masked from 'package:MASS':
## 
##     select
## 
## The following object is masked from 'package:stats':
## 
##     filter
source("http://rfs.kvasaheim.com/stat200.R") # importing functions
## 
## 
## Loading additional functionality to base R.
## 
##      This will make your STAT200 experience much easier 
##  and more fulfilling.
## 
## 
## Functions loaded:
##   ✓ bayes.law
##   ✓ binom.plot
##   ✓ binom.pred
##   ✓ conf.bounds
##   ✓ cv
##   ✓ forsberg.test
##   ✓ getMeans
##   ✓ groupTransform
##   ✓ hildebrand.rule
##   ✓ histogram
##   ✓ interval
##   ✓ isBetween
##   ✓ kruskalMCT
##   ✓ kurtosis
##   ✓ laakso
##   ✓ links
##   ✓ make.link
##   ✓ means
##   ✓ median.test
##   ✓ model.fit
##   ✓ modal
##   ✓ normoverlay
##   ✓ ogive
##   ✓ onevar.test
##   ✓ overlay
##   ✓ paretochart
##   ✓ predictionEllipse
##   ✓ quartile
##   ✓ runs.test
##   ✓ set.base
##   ✓ shapiroTest
##   ✓ skew
##   ✓ summaryHCE
##   ✓ summaryVIFA
##   ✓ tapd
##   ✓ theme
##   ✓ tukey.var.test
##   ✓ wald.test
##   ✓ z.test
##   ✓ zscore
## 
## 
## 
## 
## 
## Happy analyzing!!!
library(readr)
library(Hmisc)
## 
## Attaching package: 'Hmisc'
## 
## The following object is masked from 'package:survey':
## 
##     deff
## 
## The following objects are masked from 'package:dplyr':
## 
##     src, summarize
## 
## The following object is masked from 'package:psych':
## 
##     describe
## 
## The following objects are masked from 'package:base':
## 
##     format.pval, units
library(corrplot)
## corrplot 0.92 loaded
library(readxl)
library(lm.beta)
library(sjPlot)
library(sjmisc)
## Learn more about sjmisc with 'browseVignettes("sjmisc")'.
## 
## Attaching package: 'sjmisc'
## 
## The following object is masked from 'package:Hmisc':
## 
##     %nin%
## 
## The following object is masked from 'package:rlang':
## 
##     is_empty
## 
## The following object is masked from 'package:purrr':
## 
##     is_empty
## 
## The following object is masked from 'package:tidyr':
## 
##     replace_na
## 
## The following object is masked from 'package:tibble':
## 
##     add_case
library(ggplot2)
library(lsr)
library("car") # load car and carData packages
## Loading required package: carData
## 
## Attaching package: 'car'
## 
## The following object is masked _by_ '.GlobalEnv':
## 
##     logit
## 
## The following object is masked from 'package:dplyr':
## 
##     recode
## 
## The following object is masked from 'package:purrr':
## 
##     some
## 
## The following object is masked from 'package:psych':
## 
##     logit
library("nnet")
library("effects")
## lattice theme set by effectsTheme()
## See ?effectsTheme for details.
library(effectsize)
## 
## Attaching package: 'effectsize'
## 
## The following objects are masked from 'package:rstatix':
## 
##     cohens_d, eta_squared
## 
## The following object is masked from 'package:psych':
## 
##     phi
## 
## The following object is masked from 'package:mvtnorm':
## 
##     standardize
library(arm)
## Loading required package: lme4
## 
## Attaching package: 'lme4'
## 
## The following object is masked from 'package:rio':
## 
##     factorize
## 
## 
## arm (Version 1.13-1, built: 2022-8-25)
## 
## Working directory is /Users/jonathandoriscar/Documents/White Fragility/Buying Black and Moral Affirmation
## 
## 
## Attaching package: 'arm'
## 
## The following object is masked _by_ '.GlobalEnv':
## 
##     logit
## 
## The following objects are masked from 'package:effectsize':
## 
##     display, standardize
## 
## The following object is masked from 'package:car':
## 
##     logit
## 
## The following object is masked from 'package:corrplot':
## 
##     corrplot
## 
## The following objects are masked from 'package:psych':
## 
##     logit, rescale, sim
## 
## The following object is masked from 'package:mvtnorm':
## 
##     standardize
library(lmtest)
# Install the package jtools if not already installed
library(jtools)
## 
## Attaching package: 'jtools'
## 
## The following object is masked from 'package:arm':
## 
##     standardize
## 
## The following object is masked from 'package:effectsize':
## 
##     standardize
## 
## The following objects are masked from 'package:sjmisc':
## 
##     %nin%, center
## 
## The following object is masked from 'package:Hmisc':
## 
##     %nin%
## 
## The following object is masked from 'package:mvtnorm':
## 
##     standardize
library(broom.mixed)
# you may be asked to install 'broom' and 'ggstance' packages as well
library("ggstance")
## 
## Attaching package: 'ggstance'
## 
## The following objects are masked from 'package:ggplot2':
## 
##     geom_errorbarh, GeomErrorbarh
library(irr)
## Loading required package: lpSolve
## 
## Attaching package: 'irr'
## 
## The following object is masked from 'package:agricolae':
## 
##     kendall
library("ggsignif")
library("mgcv")
## Loading required package: nlme
## 
## Attaching package: 'nlme'
## 
## The following object is masked from 'package:lme4':
## 
##     lmList
## 
## The following object is masked from 'package:dplyr':
## 
##     collapse
## 
## This is mgcv 1.8-42. For overview type 'help("mgcv-package")'.
## 
## Attaching package: 'mgcv'
## 
## The following object is masked from 'package:nnet':
## 
##     multinom
library("effects")
library(forcats)
options(scipen = 100)
wf_dt <- read_csv("~/Documents/White Fragility/Buying Black and Moral Affirmation/wf_maS1.csv")
## Rows: 327 Columns: 116
## ── Column specification ────────────────────────────────────────────────────────
## Delimiter: ","
## chr (39): StartDate, EndDate, RecordedDate, ResponseId, DistributionChannel,...
## dbl (76): ParticipantID, Status, Progress, Duration (in seconds), Finished, ...
## num  (1): race
## 
## ℹ Use `spec()` to retrieve the full column specification for this data.
## ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
View(wf_dt)
attach(wf_dt)
colnames(wf_dt)
##   [1] "ParticipantID"         "StartDate"             "EndDate"              
##   [4] "Status"                "Progress"              "Duration (in seconds)"
##   [7] "Finished"              "RecordedDate"          "ResponseId"           
##  [10] "DistributionChannel"   "UserLanguage"          "consent"              
##  [13] "prolificID"            "BA_1"                  "BA_2"                 
##  [16] "BA_3"                  "BA_4"                  "SE1"                  
##  [19] "BMIS_sad_1"            "BMIS_shame_1"          "BMIS_guilt_1"         
##  [22] "BMIS_tired_1"          "BMIS_nervous_1"        "BMIS_calm_1"          
##  [25] "BMIS_fedup_1"          "BMIS_loving_1"         "BMIS_angry_1"         
##  [28] "BMIS_lively_1"         "BMIS_caring_1"         "BMIS_content_1"       
##  [31] "BMIS_gloomy_1"         "BMIS_jittery_1"        "BMIS_drowsy_1"        
##  [34] "BMIS_happy_1"          "Q1 RP1"                "Q2 RP2"               
##  [37] "Q3 RP3"                "Q4 RP4"                "Q5 RP5"               
##  [40] "Q6 RP6"                "Q7 RP7"                "Q8 RN1"               
##  [43] "Q9 RN2"                "Q10 RN3"               "Q11 RN4"              
##  [46] "Q12 RN5"               "Q13 RN6"               "Q14 RN7"              
##  [49] "Q15 LP1"               "Q16 LP2"               "Q17 LP3"              
##  [52] "Q18 LP4"               "Q19 LP5"               "Q20 LP6"              
##  [55] "Q21 LP7"               "Q22 LN1"               "Q23 LN2"              
##  [58] "Q24 LN3"               "Q25 LN4"               "Q26 LN5"              
##  [61] "Q27 LN6"               "Q28 LN7"               "spwtime_First Click"  
##  [64] "spwtime_Last Click"    "spwtime_Page Submit"   "spwtime_Click Count"  
##  [67] "attentioncheck_nb"     "discrepancy_nb"        "BMIS_sad_2"           
##  [70] "BMIS_shame_2"          "BMIS_guilt_2"          "BMIS_tired_2"         
##  [73] "BMIS_nervous_2"        "BMIS_calm_2"           "BMIS_fedup_2"         
##  [76] "BMIS_loving_2"         "BMIS_angry_2"          "BMIS_lively_2"        
##  [79] "BMIS_caring_2"         "BMIS_content_2"        "BMIS_gloomy_2"        
##  [82] "BMIS_jittery_2"        "BMIS_drowsy_2"         "BMIS_happy_2"         
##  [85] "credibility"           "objective"             "valid"                
##  [88] "useful"                "rl_product_choice"     "rl_shop_intentions"   
##  [91] "rl_purchase"           "rl_wom"                "rr_product_choice"    
##  [94] "rr_shop_intentions"    "rr_purchase"           "rr_wom"               
##  [97] "attentioncheck_b"      "discrepancy_b"         "SE_2"                 
## [100] "age"                   "race"                  "education"            
## [103] "polit_affil"           "polit_affil_4_TEXT"    "polit_affil_cont_1"   
## [106] "gender"                "gender_4_TEXT"         "iat_prev"             
## [109] "iat_racial"            "iat_racial_time"       "iat_racial_quant"     
## [112] "recent_results"        "nobias_white"          "nobias_black"         
## [115] "bias_white"            "bias_black"
summary(wf_dt[,19:34])
##    BMIS_sad_1     BMIS_shame_1    BMIS_guilt_1    BMIS_tired_1  BMIS_nervous_1 
##  Min.   :1.000   Min.   :1.000   Min.   :1.000   Min.   :1.00   Min.   :1.000  
##  1st Qu.:1.000   1st Qu.:1.000   1st Qu.:1.000   1st Qu.:2.00   1st Qu.:1.000  
##  Median :2.000   Median :1.000   Median :1.000   Median :3.00   Median :2.000  
##  Mean   :1.923   Mean   :1.611   Mean   :1.675   Mean   :2.71   Mean   :1.871  
##  3rd Qu.:3.000   3rd Qu.:2.000   3rd Qu.:2.000   3rd Qu.:3.00   3rd Qu.:3.000  
##  Max.   :4.000   Max.   :4.000   Max.   :4.000   Max.   :4.00   Max.   :4.000  
##  NA's   :16      NA's   :16      NA's   :16      NA's   :17     NA's   :16     
##   BMIS_calm_1     BMIS_fedup_1  BMIS_loving_1    BMIS_angry_1   BMIS_lively_1  
##  Min.   :1.000   Min.   :1.00   Min.   :1.000   Min.   :1.000   Min.   :1.000  
##  1st Qu.:3.000   1st Qu.:1.00   1st Qu.:2.000   1st Qu.:1.000   1st Qu.:2.000  
##  Median :3.000   Median :1.00   Median :3.000   Median :1.000   Median :2.000  
##  Mean   :3.132   Mean   :1.81   Mean   :2.728   Mean   :1.437   Mean   :2.259  
##  3rd Qu.:4.000   3rd Qu.:3.00   3rd Qu.:3.000   3rd Qu.:2.000   3rd Qu.:3.000  
##  Max.   :4.000   Max.   :4.00   Max.   :4.000   Max.   :4.000   Max.   :4.000  
##  NA's   :16      NA's   :16     NA's   :18      NA's   :16      NA's   :18     
##  BMIS_caring_1   BMIS_content_1  BMIS_gloomy_1   BMIS_jittery_1 
##  Min.   :1.000   Min.   :1.000   Min.   :1.000   Min.   :1.000  
##  1st Qu.:2.000   1st Qu.:2.000   1st Qu.:1.000   1st Qu.:1.000  
##  Median :3.000   Median :3.000   Median :2.000   Median :1.000  
##  Mean   :2.929   Mean   :2.842   Mean   :1.897   Mean   :1.605  
##  3rd Qu.:4.000   3rd Qu.:4.000   3rd Qu.:3.000   3rd Qu.:2.000  
##  Max.   :4.000   Max.   :4.000   Max.   :4.000   Max.   :4.000  
##  NA's   :16      NA's   :16      NA's   :16      NA's   :16     
##  BMIS_drowsy_1    BMIS_happy_1  
##  Min.   :1.000   Min.   :1.000  
##  1st Qu.:1.000   1st Qu.:2.000  
##  Median :2.000   Median :3.000  
##  Mean   :2.129   Mean   :2.768  
##  3rd Qu.:3.000   3rd Qu.:3.000  
##  Max.   :4.000   Max.   :4.000  
##  NA's   :17      NA's   :16
summary(wf_dt[,69:84])
##    BMIS_sad_2     BMIS_shame_2    BMIS_guilt_2    BMIS_tired_2  
##  Min.   :1.000   Min.   :1.000   Min.   :1.000   Min.   :1.000  
##  1st Qu.:1.000   1st Qu.:1.000   1st Qu.:1.000   1st Qu.:2.000  
##  Median :2.000   Median :1.000   Median :1.000   Median :3.000  
##  Mean   :1.986   Mean   :1.776   Mean   :1.763   Mean   :2.642  
##  3rd Qu.:3.000   3rd Qu.:3.000   3rd Qu.:3.000   3rd Qu.:3.000  
##  Max.   :4.000   Max.   :4.000   Max.   :4.000   Max.   :4.000  
##  NA's   :32      NA's   :33      NA's   :32      NA's   :34     
##  BMIS_nervous_2   BMIS_calm_2     BMIS_fedup_2   BMIS_loving_2  
##  Min.   :1.000   Min.   :1.000   Min.   :1.000   Min.   :1.000  
##  1st Qu.:1.000   1st Qu.:3.000   1st Qu.:1.000   1st Qu.:2.000  
##  Median :2.000   Median :3.000   Median :1.000   Median :3.000  
##  Mean   :1.844   Mean   :2.963   Mean   :1.687   Mean   :2.693  
##  3rd Qu.:3.000   3rd Qu.:4.000   3rd Qu.:2.000   3rd Qu.:3.000  
##  Max.   :4.000   Max.   :4.000   Max.   :4.000   Max.   :4.000  
##  NA's   :33      NA's   :32      NA's   :33      NA's   :34     
##   BMIS_angry_2   BMIS_lively_2   BMIS_caring_2   BMIS_content_2 
##  Min.   :1.000   Min.   :1.000   Min.   :1.000   Min.   :1.000  
##  1st Qu.:1.000   1st Qu.:2.000   1st Qu.:2.000   1st Qu.:2.000  
##  Median :1.000   Median :2.000   Median :3.000   Median :3.000  
##  Mean   :1.485   Mean   :2.278   Mean   :2.827   Mean   :2.803  
##  3rd Qu.:2.000   3rd Qu.:3.000   3rd Qu.:4.000   3rd Qu.:3.500  
##  Max.   :4.000   Max.   :4.000   Max.   :4.000   Max.   :4.000  
##  NA's   :32      NA's   :32      NA's   :33      NA's   :32     
##  BMIS_gloomy_2   BMIS_jittery_2  BMIS_drowsy_2    BMIS_happy_2  
##  Min.   :1.000   Min.   :1.000   Min.   :1.000   Min.   :1.000  
##  1st Qu.:1.000   1st Qu.:1.000   1st Qu.:1.000   1st Qu.:2.000  
##  Median :2.000   Median :1.000   Median :2.000   Median :3.000  
##  Mean   :1.898   Mean   :1.646   Mean   :2.156   Mean   :2.617  
##  3rd Qu.:3.000   3rd Qu.:2.000   3rd Qu.:3.000   3rd Qu.:3.000  
##  Max.   :4.000   Max.   :4.000   Max.   :4.000   Max.   :4.000  
##  NA's   :33      NA's   :33      NA's   :32      NA's   :32
summary(wf_dt[,14:17])
##       BA_1            BA_2            BA_3           BA_4      
##  Min.   :1.000   Min.   :1.000   Min.   :1.00   Min.   :1.000  
##  1st Qu.:2.000   1st Qu.:2.000   1st Qu.:2.00   1st Qu.:2.000  
##  Median :3.000   Median :3.000   Median :4.00   Median :4.000  
##  Mean   :3.334   Mean   :3.273   Mean   :3.73   Mean   :3.987  
##  3rd Qu.:5.000   3rd Qu.:5.000   3rd Qu.:5.00   3rd Qu.:5.500  
##  Max.   :7.000   Max.   :7.000   Max.   :7.00   Max.   :7.000  
##  NA's   :16      NA's   :16      NA's   :16     NA's   :16
wf_dt$discrepancy_nb <- car::recode(wf_dt$discrepancy_nb, "1 = '3'; 2 = '2'; 3 = '1'; 4 = '0'; 5 = '-1'; 6 = '-2'; 7 = '-3'")
wf_dt$attentioncheck_nb <- car::recode(wf_dt$attentioncheck_nb, "1 = '3'; 2 = '2'; 3 = '1'; 4 = '0'; 5 = '-1'; 6 = '-2'; 7 = '-3'")
wf_dt$discrepancy_b <- car::recode(wf_dt$discrepancy_b, "1 = '3'; 2 = '2'; 3 = '1'; 4 = '0'; 5 = '-1'; 6 = '-2'; 7 = '-3'")
wf_dt$attentioncheck_b <- car::recode(wf_dt$attentioncheck_b, "1 = '3'; 2 = '2'; 3 = '1'; 4 = '0'; 5 = '-1'; 6 = '-2'; 7 = '-3'") # larger scores indicate greater changes

wf_dt$discrepancy_bias <- (wf_dt$discrepancy_b - wf_dt$attentioncheck_b) # deal with this later
wf_dt$discrepancy_nobias <- (wf_dt$discrepancy_nb + wf_dt$discrepancy_nb) # deal with this later
wf_dt$bias_discrepancy <- coalesce(wf_dt$discrepancy_bias, wf_dt$discrepancy_nobias) 
wf_dt$attentioncheck_nb <- as.numeric(wf_dt$attentioncheck_nb)
is.numeric(wf_dt$attentioncheck_nb)
## [1] TRUE
which(wf_dt$attentioncheck_nb == "1", arr.ind = TRUE) # seeing which people failed my attention checks
## [1]  36 116 179 212 277 283 310 318 319
which(wf_dt$attentioncheck_nb == "2", arr.ind = TRUE)
## [1]  77 115 208 240 247 249
which(wf_dt$attentioncheck_nb == "3", arr.ind = TRUE)
## [1]  21 167 192 325
which(wf_dt$attentioncheck_nb == "5", arr.ind = TRUE)
## integer(0)
which(wf_dt$attentioncheck_nb == "6", arr.ind = TRUE)
## integer(0)
which(wf_dt$attentioncheck_nb == "7", arr.ind = TRUE)
## integer(0)
wf_dt$attentioncheck_b <- as.numeric(wf_dt$attentioncheck_b)
is.numeric(wf_dt$attentioncheck_b)
## [1] TRUE
which(wf_dt$attentioncheck_b == "4", arr.ind = TRUE) # seeing which people failed my attention checks
## integer(0)
which(wf_dt$attentioncheck_b == "2", arr.ind = TRUE)
## [1]   2  26  69 134 200 201 251 261 272
which(wf_dt$attentioncheck_b == "3", arr.ind = TRUE)
##   [1]   5   9  11  17  20  24  29  30  34  35  37  38  40  43  44  45  48  49
##  [19]  50  52  56  58  62  63  64  73  75  80  83  86  87  91  92  93  94  97
##  [37]  98 102 104 105 107 108 112 122 124 125 128 129 131 135 136 137 139 140
##  [55] 143 144 146 147 149 150 151 154 157 160 161 163 164 176 180 182 183 184
##  [73] 185 186 187 188 189 190 193 195 196 198 202 205 226 231 232 233 236 238
##  [91] 239 245 248 252 253 255 257 258 259 263 266 268 270 273 275 276 278 279
## [109] 281 282 285 286 289 293 294 296 297 300 304 305 306 311 312 313 316 317
## [127] 320 321 322 324 326 327
which(wf_dt$attentioncheck_b == "5", arr.ind = TRUE)
## integer(0)
which(wf_dt$attentioncheck_b == "6", arr.ind = TRUE)
## integer(0)
which(wf_dt$attentioncheck_b == "7", arr.ind = TRUE)
## integer(0)
wf_dt2 <- wf_dt[-c(61, 81, 88, 82, 15, 301, 315, 323, 254),] # only lost 9 people
wf_dt2$bias_discrepancy_centered <- scale(wf_dt2$bias_discrepancy, center = TRUE, scale = FALSE) # Centering
wf_dt2$bias_discrepancy_Z <- scale(wf_dt2$bias_discrepancy, center = TRUE, scale = TRUE) # Z-score
wf_dt2 <- wf_dt2 %>%
  mutate(
    condition = rowSums(select(., nobias_white, nobias_black, bias_white, bias_black), na.rm = TRUE)
  )
as.factor(wf_dt2$condition)
##   [1] 0 3 0 1 2 0 1 0 2 1 3 1 0 1 0 3 1 0 2 1 1 0 3 1 2 0 1 3 3 0 1 3 2 3 0 3 2
##  [38] 0 3 1 0 2 2 3 0 0 2 2 2 0 3 0 1 1 3 1 3 3 0 2 3 2 0 1 0 1 2 0 2 0 2 0 3 1
##  [75] 1 3 1 2 2 0 0 2 3 2 0 2 3 3 3 1 1 3 2 1 1 0 3 0 3 3 1 3 3 0 0 1 3 0 0 1 1
## [112] 0 2 1 0 2 2 0 2 3 0 1 3 3 0 3 1 0 2 3 2 2 1 2 2 1 1 2 2 0 2 3 0 3 2 2 1 0
## [149] 3 0 3 3 1 0 3 2 0 2 2 0 1 0 1 1 0 1 0 0 1 1 3 1 0 0 2 1 3 2 2 2 2 3 2 3 3
## [186] 0 1 3 1 3 2 0 3 0 3 2 2 3 2 3 0 3 1 1 0 0 1 0 0 0 0 3 1 1 2 1 0 0 3 2 2 2
## [223] 1 0 0 2 3 2 0 1 3 1 3 2 0 0 1 2 0 3 0 1 2 1 0 2 3 3 2 1 3 2 3 0 2 1 3 0 1
## [260] 3 0 2 1 2 0 3 2 1 3 2 0 2 3 1 2 3 0 1 2 3 1 0 2 1 0 0 2 3 0 3 2 0 1 2 0 0
## [297] 2 3 3 0 1 0 1 3 3 2 1 2 3 1 1 2 2 3 2 1 2 3
## Levels: 0 1 2 3
wf_dt2 <- wf_dt2 %>%
  mutate(
    bias = case_when(
      condition %in% c(0, 1) ~ 0,
      condition %in% c(2, 3) ~ 1,
      TRUE ~ NA_integer_
    ),
    brand_race = case_when(
      condition %in% c(0, 2) ~ 0,
      condition %in% c(1, 3) ~ 1,
      TRUE ~ NA_integer_
    )
  )
wf_dt2 <- wf_dt2 %>%
  mutate(
    shop_intentions = rowSums(select(., rl_shop_intentions, rr_shop_intentions), na.rm = TRUE)
  )
# Centering and z-score for "shop_intentions"
wf_dt2$shop_intentions_centered <- scale(wf_dt2$shop_intentions, center = TRUE, scale = FALSE) # Centering
wf_dt2$shop_intentions_Z <- scale(wf_dt2$shop_intentions, center = TRUE, scale = TRUE) # Z-score
wf_dt2 <- wf_dt2 %>%
  mutate(
    purchase = rowSums(select(., rl_purchase, rr_purchase), na.rm = TRUE)
  )
# Centering and z-score for "purchase"
wf_dt2$purchase_centered <- scale(wf_dt2$purchase, center = TRUE, scale = FALSE) # Centering
wf_dt2$purchase_Z <- scale(wf_dt2$purchase, center = TRUE, scale = TRUE) # Z-score
wf_dt2 <- wf_dt2 %>%
  mutate(
    wom = rowSums(select(., rl_wom, rr_wom), na.rm = TRUE)
  )
# Centering and z-score for "wom"
wf_dt2$wom_centered <- scale(wf_dt2$wom, center = TRUE, scale = FALSE) # Centering
wf_dt2$wom_Z <- scale(wf_dt2$wom, center = TRUE, scale = TRUE) # Z-score

** Demographics**

race_proportions <- table(wf_dt2$race)/length(wf_dt2$race) # creating race table
(race_percentages <- race_proportions*100) # multiplying the table by 100 to get percentages
## 
##          1          3          4          6          7         14         15 
## 86.7924528  0.3144654  0.3144654  0.3144654  0.6289308  0.3144654  0.3144654 
##         16 
##  0.9433962
gender_proportions <- table(wf_dt2$gender)/length(wf_dt2$gender) # creating gender table
(gender_percentages <- gender_proportions*100) # multiplying the table by 100 to get percentages
## 
##          1          2          3          4          5 
## 41.8238994 45.2830189  2.2012579  0.3144654  0.3144654
political_proportions <- table(wf_dt2$polit_affil)/length(wf_dt2$polit_affil) # creating political table
(political_percentages <- political_proportions*100) # multiplying the table by 100 to get percentages
## 
##         1         2         3         4         5 
## 17.610063 47.169811 22.012579  1.886792  1.257862
education_proportions <- table(wf_dt2$education)/length(wf_dt2$education) # creating education table
(education_percentages <- education_proportions*100) # multiplying the table by 100 to get percentages
## 
##          1          2          3          4          5          6          7 
##  0.6289308 10.6918239 18.8679245  9.7484277 36.4779874 12.8930818  0.6289308
iatprev_proportions <- table(wf_dt2$iat_prev)/length(wf_dt2$iat_prev) # creating IAT previously table
(iatprev_percentages <- iatprev_proportions*100) # multiplying the table by 100 to get percentages
## 
##       21       22 
## 59.74843 30.18868
iatracial_proportions <- table(wf_dt2$iat_racial)/length(wf_dt2$iat_racial) # creating IAT racial table
(iatracial_percentages <- iatracial_proportions*100) # multiplying the table by 100 to get percentages
## 
##        21        22 
##  6.289308 23.899371

1. Defensiveness

hildebrand.rule(credibility, na.rm = TRUE)
##   Group     Ratio    Skew
## 1   xx  -0.115851 No Skew
hildebrand.rule(objective, na.rm = TRUE)
##   Group       Ratio    Skew
## 1   xx  0.008063028 No Skew
hildebrand.rule(valid, na.rm = TRUE)
##   Group      Ratio    Skew
## 1   xx  -0.1385317 No Skew
hildebrand.rule(useful, na.rm = TRUE)
##   Group      Ratio          Skew
## 1   xx  -0.2647419 Negative Skew
sd(credibility, na.rm = TRUE)
## [1] 1.708679
sd(objective, na.rm = TRUE)
## [1] 1.698944
sd(valid, na.rm = TRUE)
## [1] 1.705757
sd(useful, na.rm = TRUE)
## [1] 1.759273
hist(credibility)

hist(objective)

hist(valid)

hist(useful)

defensive <- select(wf_dt2, 85:88)
psych::alpha(defensive)
## 
## Reliability analysis   
## Call: psych::alpha(x = defensive)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N    ase mean  sd median_r
##       0.92      0.92    0.91      0.74  11 0.0078  3.8 1.6     0.73
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt      0.9  0.92  0.93
## Duhachek   0.9  0.92  0.93
## 
##  Reliability if an item is dropped:
##             raw_alpha std.alpha G6(smc) average_r  S/N alpha se  var.r med.r
## credibility      0.87      0.87    0.84      0.70  6.9   0.0126 0.0144  0.67
## objective        0.94      0.94    0.91      0.83 14.7   0.0063 0.0022  0.83
## valid            0.86      0.86    0.82      0.68  6.3   0.0135 0.0094  0.65
## useful           0.89      0.89    0.87      0.73  8.3   0.0109 0.0155  0.67
## 
##  Item statistics 
##               n raw.r std.r r.cor r.drop mean  sd
## credibility 284  0.93  0.93  0.91   0.86  3.8 1.7
## objective   283  0.81  0.82  0.70   0.68  4.0 1.7
## valid       283  0.94  0.94  0.94   0.90  3.8 1.7
## useful      283  0.90  0.90  0.85   0.81  3.5 1.8
## 
## Non missing response frequency for each item
##                1    2    3    4    5    6    7 miss
## credibility 0.15 0.11 0.10 0.27 0.23 0.08 0.06 0.11
## objective   0.13 0.06 0.12 0.30 0.17 0.14 0.07 0.11
## valid       0.15 0.10 0.17 0.23 0.19 0.11 0.05 0.11
## useful      0.18 0.15 0.15 0.21 0.19 0.07 0.06 0.11
wf_dt2$defensive <- rowMeans(wf_dt2[,85:88]) # creating defensiveness variable
myscale <- 1:7 #defining scale to reverse defensivness variable
wf_dt2 <- wf_dt2 %>%
  mutate(defensive_reverse = min(myscale) - defensive + max(myscale)) # reversing defensivness scale
wf_dt2$defensive_reverse_centered <- scale(wf_dt2$defensive_reverse, center = TRUE, scale = FALSE) #centering the variable
wf_dt2$defensive_reverse_Z <- scale(wf_dt2$defensive_reverse, center = TRUE, scale = TRUE) # z score defensiveness 
(def_r_mean =  mean(wf_dt2$defensive_reverse, na.rm = TRUE)) # mean defensivness
## [1] 4.241906
(def_r_sd =  sd(wf_dt2$defensive_reverse, na.rm = TRUE)) # sd defensiveness
## [1] 1.548895
hist(wf_dt2$defensive_reverse_Z) # histogram of defensive z scores

# Perform ANOVA for defensive_reverse by bias
summary((anova_defensive_reverse <- aov(defensive_reverse ~ as.factor(bias)*as.factor(brand_race), data = wf_dt2)))
##                                        Df Sum Sq Mean Sq F value      Pr(>F)
## as.factor(bias)                         1   56.5   56.49  25.670 0.000000746
## as.factor(brand_race)                   1    0.5    0.53   0.243       0.622
## as.factor(bias):as.factor(brand_race)   1    4.5    4.50   2.047       0.154
## Residuals                             274  603.0    2.20                    
##                                          
## as.factor(bias)                       ***
## as.factor(brand_race)                    
## as.factor(bias):as.factor(brand_race)    
## Residuals                                
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 40 observations deleted due to missingness
# Get summary of ANOVA
(tukey_posthoc <- TukeyHSD(anova_defensive_reverse))
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = defensive_reverse ~ as.factor(bias) * as.factor(brand_race), data = wf_dt2)
## 
## $`as.factor(bias)`
##          diff       lwr     upr     p adj
## 1-0 0.9044271 0.5530046 1.25585 0.0000007
## 
## $`as.factor(brand_race)`
##           diff        lwr       upr     p adj
## 1-0 0.08766191 -0.2626675 0.4379914 0.6226813
## 
## $`as.factor(bias):as.factor(brand_race)`
##               diff            lwr        upr     p adj
## 1:0-0:0  0.6523987 -0.00004964437  1.3048470 0.0500256
## 0:1-0:0 -0.1879277 -0.86613489865  0.4902796 0.8905901
## 1:1-0:0  0.9755824  0.31120396399  1.6399609 0.0010360
## 0:1-1:0 -0.8403263 -1.48166470937 -0.1989880 0.0044803
## 1:1-1:0  0.3231838 -0.30351282130  0.9498803 0.5427063
## 1:1-0:1  1.1635101  0.51003871844  1.8169815 0.0000378

2. Bias Awareness

hildebrand.rule(BA_1, na.rm = TRUE)
##   Group     Ratio    Skew
## 1   xx  0.1849701 No Skew
hildebrand.rule(BA_2, na.rm = TRUE)
##   Group     Ratio    Skew
## 1   xx  0.1529581 No Skew
hildebrand.rule(BA_3, na.rm = TRUE)
##   Group     Ratio    Skew
## 1   xx  -0.148806 No Skew
hildebrand.rule(BA_4, na.rm = TRUE)
##   Group        Ratio    Skew
## 1   xx  -0.006890947 No Skew
sd(BA_1, na.rm = TRUE)
## [1] 1.807888
sd(BA_2, na.rm = TRUE)
## [1] 1.786841
sd(BA_3, na.rm = TRUE)
## [1] 1.815091
sd(BA_4, na.rm = TRUE)
## [1] 1.866469
hist(BA_1)

hist(BA_2)

hist(BA_3)

hist(BA_4)

bias_aware <- select(wf_dt2, 14:17)
psych::alpha(bias_aware)
## 
## Reliability analysis   
## Call: psych::alpha(x = bias_aware)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean  sd median_r
##       0.82      0.82    0.79      0.53 4.5 0.017  3.6 1.5     0.54
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.78  0.82  0.85
## Duhachek  0.78  0.82  0.85
## 
##  Reliability if an item is dropped:
##      raw_alpha std.alpha G6(smc) average_r S/N alpha se  var.r med.r
## BA_1      0.81      0.81    0.75      0.59 4.3    0.019 0.0084  0.55
## BA_2      0.75      0.75    0.69      0.50 3.0    0.024 0.0123  0.55
## BA_3      0.71      0.71    0.63      0.45 2.5    0.028 0.0053  0.46
## BA_4      0.80      0.80    0.75      0.58 4.1    0.019 0.0141  0.58
## 
##  Item statistics 
##        n raw.r std.r r.cor r.drop mean  sd
## BA_1 302  0.75  0.75  0.61   0.55  3.3 1.8
## BA_2 302  0.83  0.83  0.76   0.68  3.3 1.8
## BA_3 302  0.88  0.88  0.85   0.76  3.7 1.8
## BA_4 302  0.77  0.76  0.63   0.57  4.0 1.9
## 
## Non missing response frequency for each item
##         1    2    3    4    5    6    7 miss
## BA_1 0.19 0.25 0.13 0.06 0.24 0.11 0.02 0.05
## BA_2 0.20 0.26 0.13 0.06 0.25 0.08 0.03 0.05
## BA_3 0.15 0.19 0.12 0.09 0.30 0.12 0.04 0.05
## BA_4 0.12 0.18 0.12 0.09 0.25 0.18 0.07 0.05
wf_dt2$bias_aware <- rowMeans(wf_dt2[,14:17])
wf_dt2$bias_aware_center <- scale(wf_dt2$bias_aware, center = TRUE, scale = FALSE) #centering the variable
wf_dt2$bias_aware_Z <- scale(wf_dt2$bias_aware, center = TRUE, scale = TRUE) #Z scores 
(BA_mean =  mean(wf_dt2$bias_aware, na.rm = TRUE)) # mean of bias awareness
## [1] 3.563742
(BA_sd =  sd(wf_dt2$bias_aware, na.rm = TRUE)) # sd of bias awareness
## [1] 1.463663
hist(wf_dt2$bias_aware_Z)

# Perform ANOVA for bias_aware by condition
summary((anova_bias_aware <- aov(bias_aware ~ as.factor(condition), data = wf_dt2)))
##                       Df Sum Sq Mean Sq F value Pr(>F)
## as.factor(condition)   3    5.1   1.714   0.798  0.496
## Residuals            298  639.7   2.147               
## 16 observations deleted due to missingness
# Run Tukey post hoc test
(tukey_posthoc <- TukeyHSD(anova_bias_aware))
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = bias_aware ~ as.factor(condition), data = wf_dt2)
## 
## $`as.factor(condition)`
##            diff        lwr       upr     p adj
## 1-0 -0.29681467 -0.9279424 0.3343131 0.6177784
## 2-0  0.01830357 -0.6012124 0.6378196 0.9998402
## 3-0 -0.18177656 -0.8049880 0.4414349 0.8750921
## 2-1  0.31511824 -0.2954025 0.9256390 0.5423101
## 3-1  0.11503812 -0.4992322 0.7293085 0.9626422
## 3-2 -0.20008013 -0.8024138 0.4022536 0.8263152

3. Emotions

hildebrand.rule(BMIS_lively_1, na.rm = TRUE)
##   Group    Ratio          Skew
## 1   xx  0.290114 Positive Skew
hildebrand.rule(BMIS_lively_2, na.rm = TRUE)
##   Group     Ratio          Skew
## 1   xx  0.3121008 Positive Skew
hildebrand.rule(BMIS_happy_1, na.rm = TRUE)
##   Group      Ratio          Skew
## 1   xx  -0.2571164 Negative Skew
hildebrand.rule(BMIS_happy_2, na.rm = TRUE)
##   Group      Ratio          Skew
## 1   xx  -0.4123413 Negative Skew
hildebrand.rule(BMIS_sad_1, na.rm = TRUE)
##   Group      Ratio    Skew
## 1   xx  -0.0779093 No Skew
hildebrand.rule(BMIS_sad_2, na.rm = TRUE)
##   Group       Ratio    Skew
## 1   xx  -0.01324874 No Skew
hildebrand.rule(BMIS_shame_1, na.rm = TRUE)
##   Group     Ratio          Skew
## 1   xx  0.7093866 Positive Skew
hildebrand.rule(BMIS_shame_2, na.rm = TRUE)
##   Group    Ratio          Skew
## 1   xx  0.803145 Positive Skew
hildebrand.rule(BMIS_guilt_1, na.rm = TRUE)
##   Group     Ratio          Skew
## 1   xx  0.7735714 Positive Skew
hildebrand.rule(BMIS_guilt_2, na.rm = TRUE)
##   Group     Ratio          Skew
## 1   xx  0.7938481 Positive Skew
hildebrand.rule(BMIS_tired_1, na.rm = TRUE)
##   Group     Ratio          Skew
## 1   xx  -0.299749 Negative Skew
hildebrand.rule(BMIS_tired_2, na.rm = TRUE)
##   Group      Ratio          Skew
## 1   xx  -0.3492858 Negative Skew
hildebrand.rule(BMIS_caring_1, na.rm = TRUE)
##   Group       Ratio    Skew
## 1   xx  -0.07747474 No Skew
hildebrand.rule(BMIS_caring_2, na.rm = TRUE)
##   Group      Ratio    Skew
## 1   xx  -0.1813282 No Skew
hildebrand.rule(BMIS_content_1, na.rm = TRUE)
##   Group      Ratio    Skew
## 1   xx  -0.1684255 No Skew
hildebrand.rule(BMIS_content_2, na.rm = TRUE)
##   Group      Ratio          Skew
## 1   xx  -0.2072466 Negative Skew
hildebrand.rule(BMIS_gloomy_1, na.rm = TRUE)
##   Group      Ratio    Skew
## 1   xx  -0.1031091 No Skew
hildebrand.rule(BMIS_gloomy_2, na.rm = TRUE)
##   Group       Ratio    Skew
## 1   xx  -0.09969695 No Skew
hildebrand.rule(BMIS_jittery_1, na.rm = TRUE)
##   Group     Ratio          Skew
## 1   xx  0.7373568 Positive Skew
hildebrand.rule(BMIS_jittery_2, na.rm = TRUE)
##   Group     Ratio          Skew
## 1   xx  0.7611705 Positive Skew
hildebrand.rule(BMIS_drowsy_1, na.rm = TRUE)
##   Group     Ratio    Skew
## 1   xx  0.1266293 No Skew
hildebrand.rule(BMIS_drowsy_2, na.rm = TRUE)
##   Group     Ratio    Skew
## 1   xx  0.1536428 No Skew
hildebrand.rule(BMIS_nervous_1, na.rm = TRUE)
##   Group      Ratio    Skew
## 1   xx  -0.1351261 No Skew
hildebrand.rule(BMIS_nervous_2, na.rm = TRUE)
##   Group      Ratio    Skew
## 1   xx  -0.1633531 No Skew
hildebrand.rule(BMIS_calm_1, na.rm = TRUE)
##   Group     Ratio    Skew
## 1   xx  0.1559281 No Skew
hildebrand.rule(BMIS_calm_2, na.rm = TRUE)
##   Group       Ratio    Skew
## 1   xx  -0.04192267 No Skew
hildebrand.rule(BMIS_jittery_1, na.rm = TRUE)
##   Group     Ratio          Skew
## 1   xx  0.7373568 Positive Skew
hildebrand.rule(BMIS_jittery_2, na.rm = TRUE)
##   Group     Ratio          Skew
## 1   xx  0.7611705 Positive Skew
hildebrand.rule(BMIS_loving_1, na.rm = TRUE)
##   Group      Ratio          Skew
## 1   xx  -0.2786378 Negative Skew
hildebrand.rule(BMIS_loving_2, na.rm = TRUE)
##   Group      Ratio          Skew
## 1   xx  -0.3080362 Negative Skew
hildebrand.rule(BMIS_fedup_1, na.rm = TRUE)
##   Group     Ratio          Skew
## 1   xx  0.8267106 Positive Skew
hildebrand.rule(BMIS_fedup_2, na.rm = TRUE)
##   Group     Ratio          Skew
## 1   xx  0.7907416 Positive Skew
sd(BMIS_lively_1, na.rm = TRUE)
## [1] 0.8924067
sd(BMIS_lively_2, na.rm = TRUE)
## [1] 0.8906292
sd(BMIS_happy_1, na.rm = TRUE)
## [1] 0.9004142
sd(BMIS_happy_2, na.rm = TRUE)
## [1] 0.9289656
sd(BMIS_sad_1, na.rm = TRUE)
## [1] 0.9905162
sd(BMIS_sad_2, na.rm = TRUE)
## [1] 1.023443
sd(BMIS_shame_1, na.rm = TRUE)
## [1] 0.8612123
sd(BMIS_shame_2, na.rm = TRUE)
## [1] 0.9655918
sd(BMIS_guilt_1, na.rm = TRUE)
## [1] 0.872888
sd(BMIS_guilt_2, na.rm = TRUE)
## [1] 0.9607782
sd(BMIS_tired_1, na.rm = TRUE)
## [1] 0.9685524
sd(BMIS_tired_2, na.rm = TRUE)
## [1] 1.025984
sd(BMIS_caring_1, na.rm = TRUE)
## [1] 0.913066
sd(BMIS_caring_2, na.rm = TRUE)
## [1] 0.9566599
sd(BMIS_content_1, na.rm = TRUE)
## [1] 0.9354656
sd(BMIS_content_2, na.rm = TRUE)
## [1] 0.9486772
sd(BMIS_gloomy_1, na.rm = TRUE)
## [1] 0.997913
sd(BMIS_gloomy_2, na.rm = TRUE)
## [1] 1.02351
sd(BMIS_jittery_1, na.rm = TRUE)
## [1] 0.8198224
sd(BMIS_jittery_2, na.rm = TRUE)
## [1] 0.8490325
sd(BMIS_drowsy_1, na.rm = TRUE)
## [1] 1.018976
sd(BMIS_drowsy_2, na.rm = TRUE)
## [1] 1.014901
sd(BMIS_nervous_1, na.rm = TRUE)
## [1] 0.9518324
sd(BMIS_nervous_2, na.rm = TRUE)
## [1] 0.9578181
sd(BMIS_calm_1, na.rm = TRUE)
## [1] 0.8454715
sd(BMIS_calm_2, na.rm = TRUE)
## [1] 0.8894503
sd(BMIS_jittery_1, na.rm = TRUE)
## [1] 0.8198224
sd(BMIS_jittery_2, na.rm = TRUE)
## [1] 0.8490325
sd(BMIS_loving_1, na.rm = TRUE)
## [1] 0.9756201
sd(BMIS_loving_2, na.rm = TRUE)
## [1] 0.9971792
sd(BMIS_fedup_1, na.rm = TRUE)
## [1] 0.9801367
sd(BMIS_fedup_2, na.rm = TRUE)
## [1] 0.8688993
hist(BMIS_lively_1, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_lively_2, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_happy_1, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_happy_2, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_sad_1, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_sad_2, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_shame_1, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_shame_2, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_guilt_1, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_guilt_2, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_tired_1, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_tired_2, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_caring_1, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_caring_2, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_content_1, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_content_2, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_gloomy_1, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_gloomy_2, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_jittery_1, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_jittery_2, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_drowsy_1, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_drowsy_2, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_nervous_1, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_nervous_2, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_calm_1, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_calm_2, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_jittery_1, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_jittery_2, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_loving_1, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_loving_2, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_fedup_1, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

hist(BMIS_fedup_2, na.rm = TRUE)
## Warning in plot.window(xlim, ylim, "", ...): "na.rm" is not a graphical
## parameter
## Warning in title(main = main, sub = sub, xlab = xlab, ylab = ylab, ...):
## "na.rm" is not a graphical parameter
## Warning in axis(1, ...): "na.rm" is not a graphical parameter
## Warning in axis(2, at = yt, ...): "na.rm" is not a graphical parameter

wf_dt2$BMIS_guilt_1_center <- scale(wf_dt2$BMIS_guilt_1, center = TRUE, scale = FALSE) #centering the variable
wf_dt2$BMIS_guilt_1_Z <- scale(wf_dt2$BMIS_guilt_1, center = TRUE, scale = TRUE) #Z score
wf_dt2$BMIS_shame_1_center <- scale(wf_dt2$BMIS_shame_1, center = TRUE, scale = FALSE) #centering the variable
wf_dt2$BMIS_shame_1_Z <- scale(wf_dt2$BMIS_shame_1, center = TRUE, scale = TRUE) #Z score
wf_dt2$BMIS_sad_1_center <- scale(wf_dt2$BMIS_sad_1, center = TRUE, scale = FALSE) #centering the variable
wf_dt2$BMIS_sad_1_Z <- scale(wf_dt2$BMIS_sad_1, center = TRUE, scale = TRUE) #Z score
wf_dt2$BMIS_guilt_2_center <- scale(wf_dt2$BMIS_guilt_2, center = TRUE, scale = FALSE) #centering the variable
wf_dt2$BMIS_guilt_2_Z <- scale(wf_dt2$BMIS_guilt_2, center = TRUE, scale = TRUE) #Z score
wf_dt2$BMIS_shame_2_center <- scale(wf_dt2$BMIS_shame_2, center = TRUE, scale = FALSE) #centering the variable
wf_dt2$BMIS_shame_2_Z <- scale(wf_dt2$BMIS_shame_2, center = TRUE, scale = TRUE) #Z score
wf_dt2$BMIS_sad_2_center <- scale(wf_dt2$BMIS_sad_2, center = TRUE, scale = FALSE) #centering the variable
wf_dt2$BMIS_sad_2_Z <- scale(wf_dt2$BMIS_sad_2, center = TRUE, scale = TRUE) #Z score
(guilty_mean = mean(wf_dt2$BMIS_guilt, na.rm = TRUE))
## Warning: Unknown or uninitialised column: `BMIS_guilt`.
## Warning in mean.default(wf_dt2$BMIS_guilt, na.rm = TRUE): argument is not
## numeric or logical: returning NA
## [1] NA
(guilty_sd = sd(wf_dt2$BMIS_guilt, na.rm = TRUE))
## Warning: Unknown or uninitialised column: `BMIS_guilt`.
## [1] NA
(shame_mean = mean(wf_dt2$BMIS_shame, na.rm = TRUE))
## Warning: Unknown or uninitialised column: `BMIS_shame`.
## Warning in mean.default(wf_dt2$BMIS_shame, na.rm = TRUE): argument is not
## numeric or logical: returning NA
## [1] NA
(shame_sd = sd(wf_dt2$BMIS_shame, na.rm = TRUE))
## Warning: Unknown or uninitialised column: `BMIS_shame`.
## [1] NA
guilt_shame_sad <- select(wf_dt2, BMIS_sad_2, BMIS_shame_2, BMIS_guilt_2) # dropping certain variables for the alpha
psych::alpha(guilt_shame_sad)
## 
## Reliability analysis   
## Call: psych::alpha(x = guilt_shame_sad)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N  ase mean  sd median_r
##        0.9       0.9     0.9      0.76 9.5 0.01  1.8 0.9     0.68
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.88   0.9  0.92
## Duhachek  0.88   0.9  0.92
## 
##  Reliability if an item is dropped:
##              raw_alpha std.alpha G6(smc) average_r  S/N alpha se var.r med.r
## BMIS_sad_2        0.97      0.97    0.94      0.94 29.3   0.0037    NA  0.94
## BMIS_shame_2      0.80      0.80    0.66      0.66  4.0   0.0226    NA  0.66
## BMIS_guilt_2      0.81      0.81    0.68      0.68  4.2   0.0216    NA  0.68
## 
##  Item statistics 
##                n raw.r std.r r.cor r.drop mean   sd
## BMIS_sad_2   286  0.86  0.85  0.69   0.68  2.0 1.02
## BMIS_shame_2 285  0.95  0.95  0.96   0.88  1.8 0.96
## BMIS_guilt_2 286  0.94  0.95  0.95   0.87  1.8 0.96
## 
## Non missing response frequency for each item
##                 1    2    3    4 miss
## BMIS_sad_2   0.44 0.22 0.25 0.09  0.1
## BMIS_shame_2 0.54 0.20 0.20 0.06  0.1
## BMIS_guilt_2 0.55 0.20 0.20 0.06  0.1
wf_dt2$guilt_shame_sad <- rowMeans(guilt_shame_sad)
wf_dt2$guilt_shame_sad_center <- scale(wf_dt2$guilt_shame_sad, center = TRUE, scale = FALSE) #centering the variable
wf_dt2$guilt_shame_sad_Z <- scale(wf_dt2$guilt_shame_sad, center = TRUE, scale = TRUE) #Z scores 
wf_dt2 <- wf_dt2 %>% mutate(shame_discrepancy = BMIS_shame_2_Z - BMIS_shame_1_Z) # higher scores indicate an increase in shame, lower scores indicate decrease in shame
# Independent samples t-test for BMIS_shame by condition
(t_test_BMIS_shame_2 <- t.test(BMIS_shame_2 ~ bias, var.equal = TRUE, data = wf_dt2))
## 
##  Two Sample t-test
## 
## data:  BMIS_shame_2 by bias
## t = -4.6422, df = 283, p-value = 0.000005282
## alternative hypothesis: true difference in means between group 0 and group 1 is not equal to 0
## 95 percent confidence interval:
##  -0.7306252 -0.2955185
## sample estimates:
## mean in group 0 mean in group 1 
##        1.500000        2.013072
# Calculating Cohen's d for BMIS_shame by condition
(cohen_d_BMIS_shame_2 <- cohensD(BMIS_shame_2 ~ bias, data = wf_dt2))
## [1] 0.5514569
# Calculating standard deviations of BMIS_shame by condition
(sd_by_condition <- aggregate(BMIS_shame_2 ~ bias, data = wf_dt2, FUN = sd))
##   bias BMIS_shame_2
## 1    0    0.7666722
## 2    1    1.0512332
# Independent samples t-test for BMIS_shame by condition
(t_test_BMIS_shame_2 <- t.test(BMIS_shame_1 ~ bias, var.equal = TRUE, data = wf_dt2))
## 
##  Two Sample t-test
## 
## data:  BMIS_shame_1 by bias
## t = 0.23429, df = 300, p-value = 0.8149
## alternative hypothesis: true difference in means between group 0 and group 1 is not equal to 0
## 95 percent confidence interval:
##  -0.1710664  0.2173041
## sample estimates:
## mean in group 0 mean in group 1 
##        1.618056        1.594937
# Calculating Cohen's d for BMIS_shame by condition
(cohen_d_BMIS_shame_2 <- cohensD(BMIS_shame_1 ~ bias, data = wf_dt2))
## [1] 0.02699276
# Calculating standard deviations of BMIS_shame by condition
(sd_by_condition <- aggregate(BMIS_shame_1 ~ bias, data = wf_dt2, FUN = sd))
##   bias BMIS_shame_1
## 1    0    0.8609356
## 2    1    0.8524074
# One-way ANOVA for SE_2 by bias
summary((anova_SE2 <- aov(SE_2 ~ as.factor(bias)*as.factor(brand_race), data = wf_dt2)))
##                                        Df Sum Sq Mean Sq F value Pr(>F)
## as.factor(bias)                         1    1.5  1.4913   0.850  0.357
## as.factor(brand_race)                   1    1.9  1.9410   1.107  0.294
## as.factor(bias):as.factor(brand_race)   1    0.5  0.5037   0.287  0.592
## Residuals                             282  494.7  1.7541               
## 32 observations deleted due to missingness
# Tukey post hoc test
(tukey_SE2 <- TukeyHSD(anova_SE2))
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = SE_2 ~ as.factor(bias) * as.factor(brand_race), data = wf_dt2)
## 
## $`as.factor(bias)`
##           diff       lwr       upr    p adj
## 1-0 -0.1447737 -0.453845 0.1642976 0.357299
## 
## $`as.factor(brand_race)`
##          diff        lwr       upr     p adj
## 1-0 0.1646979 -0.1436244 0.4730201 0.2939405
## 
## $`as.factor(bias):as.factor(brand_race)`
##                diff        lwr       upr     p adj
## 1:0-0:0 -0.22516026 -0.8024587 0.3521382 0.7448843
## 0:1-0:0  0.07472826 -0.5192969 0.6687534 0.9881077
## 1:1-0:0  0.01791667 -0.5645624 0.6003957 0.9998195
## 0:1-1:0  0.29988852 -0.2658039 0.8655809 0.5192046
## 1:1-1:0  0.24307692 -0.3104787 0.7966326 0.6682772
## 1:1-0:1 -0.05681159 -0.6277899 0.5141667 0.9940277
# One-way ANOVA for SE_2 by bias
summary((anova_SE2 <- aov(SE_2 ~ as.factor(condition), data = wf_dt2)))
##                       Df Sum Sq Mean Sq F value Pr(>F)
## as.factor(condition)   3    3.9   1.312   0.748  0.524
## Residuals            282  494.7   1.754               
## 32 observations deleted due to missingness
# Tukey post hoc test
(tukey_SE2 <- TukeyHSD(anova_SE2))
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = SE_2 ~ as.factor(condition), data = wf_dt2)
## 
## $`as.factor(condition)`
##            diff        lwr       upr     p adj
## 1-0  0.07472826 -0.5192969 0.6687534 0.9881077
## 2-0 -0.22516026 -0.8024587 0.3521382 0.7448843
## 3-0  0.01791667 -0.5645624 0.6003957 0.9998195
## 2-1 -0.29988852 -0.8655809 0.2658039 0.5192046
## 3-1 -0.05681159 -0.6277899 0.5141667 0.9940277
## 3-2  0.24307692 -0.3104787 0.7966326 0.6682772
# Independent samples t-test for BMIS_shame by condition
(t_test_BMIS_shame_2 <- t.test(SE_2 ~ bias, var.equal = TRUE, data = wf_dt2))
## 
##  Two Sample t-test
## 
## data:  SE_2 by bias
## t = 0.92302, df = 284, p-value = 0.3568
## alternative hypothesis: true difference in means between group 0 and group 1 is not equal to 0
## 95 percent confidence interval:
##  -0.1639581  0.4535055
## sample estimates:
## mean in group 0 mean in group 1 
##        3.007519        2.862745
# Calculating Cohen's d for BMIS_shame by condition
(cohen_d_BMIS_shame_2 <- cohensD(SE_2 ~ bias, data = wf_dt2))
## [1] 0.1094265
# Calculating standard deviations of BMIS_shame by condition
(sd_by_condition <- aggregate(SE_2 ~ bias, data = wf_dt2, FUN = sd))
##   bias     SE_2
## 1    0 1.264289
## 2    1 1.371989
wf_dt2$SE_2_Z <- scale(wf_dt2$SE_2, center = TRUE, scale = TRUE) #Z scores
wf_dt2$SE_1_Z <- scale(wf_dt2$SE1, center = TRUE, scale = TRUE) #Z scores
wf_dt2 <- wf_dt2 %>% mutate(SE_change = SE_2_Z - SE_1_Z) # higher scores indicate increases while negative score indicate decreases

4. Model Regressions

# One-way ANOVA for purchase by interaction
summary((anova_purchase <- aov(purchase ~ as.factor(bias)*as.factor(brand_race), data = wf_dt2)))
##                                        Df Sum Sq Mean Sq F value    Pr(>F)    
## as.factor(bias)                         1   32.9   32.91   8.053   0.00484 ** 
## as.factor(brand_race)                   1   81.7   81.67  19.985 0.0000109 ***
## as.factor(bias):as.factor(brand_race)   1   18.4   18.44   4.511   0.03446 *  
## Residuals                             314 1283.2    4.09                      
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Tukey post hoc test
(tukey_purchase <- TukeyHSD(anova_purchase))
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = purchase ~ as.factor(bias) * as.factor(brand_race), data = wf_dt2)
## 
## $`as.factor(bias)`
##          diff       lwr     upr     p adj
## 1-0 0.6434335 0.1973273 1.08954 0.0048373
## 
## $`as.factor(brand_race)`
##         diff      lwr      upr    p adj
## 1-0 1.014068 0.567538 1.460599 0.000011
## 
## $`as.factor(bias):as.factor(brand_race)`
##              diff        lwr       upr     p adj
## 1:0-0:0 1.0729651  0.2619190 1.8840112 0.0039778
## 0:1-0:0 1.4956003  0.6676950 2.3235055 0.0000270
## 1:1-0:0 1.6040549  0.7876396 2.4204701 0.0000040
## 0:1-1:0 0.4226351 -0.4195067 1.2647769 0.5660145
## 1:1-1:0 0.5310897 -0.2997590 1.3619385 0.3516638
## 1:1-0:1 0.1084546 -0.7388593 0.9557685 0.9875159

5. Shame Regressions

summary(lm(BMIS_shame_2_Z ~ bias_aware_Z*as.factor(bias), wf_dt2)) #bias aware sig on its own and interaction
## 
## Call:
## lm(formula = BMIS_shame_2_Z ~ bias_aware_Z * as.factor(bias), 
##     data = wf_dt2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -1.8994 -0.5751 -0.4242  0.5885  2.5843 
## 
## Coefficients:
##                               Estimate Std. Error t value   Pr(>|t|)    
## (Intercept)                   -0.28392    0.08072  -3.518   0.000508 ***
## bias_aware_Z                   0.06689    0.08350   0.801   0.423730    
## as.factor(bias)1               0.51996    0.11016   4.720 0.00000372 ***
## bias_aware_Z:as.factor(bias)1  0.29877    0.11051   2.704   0.007279 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9269 on 281 degrees of freedom
##   (33 observations deleted due to missingness)
## Multiple R-squared:  0.1499, Adjusted R-squared:  0.1408 
## F-statistic: 16.51 on 3 and 281 DF,  p-value: 0.0000000006535
summary(lm(BMIS_shame_2_Z ~ defensive_reverse_Z*as.factor(bias), wf_dt2)) #defensiveness on its own not sig
## 
## Call:
## lm(formula = BMIS_shame_2_Z ~ defensive_reverse_Z * as.factor(bias), 
##     data = wf_dt2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -1.4913 -0.7599 -0.3942  0.7591  2.5916 
## 
## Coefficients:
##                                      Estimate Std. Error t value   Pr(>|t|)    
## (Intercept)                          -0.24669    0.08990  -2.744    0.00647 ** 
## defensive_reverse_Z                   0.11327    0.09187   1.233    0.21865    
## as.factor(bias)1                      0.55590    0.12125   4.585 0.00000693 ***
## defensive_reverse_Z:as.factor(bias)1 -0.29353    0.12194  -2.407    0.01674 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9609 on 273 degrees of freedom
##   (41 observations deleted due to missingness)
## Multiple R-squared:  0.09452,    Adjusted R-squared:  0.08457 
## F-statistic: 9.499 on 3 and 273 DF,  p-value: 0.000005463
summary(lm(BMIS_shame_2_Z ~ BMIS_shame_1_Z, wf_dt2)) # t1 and t2 are somewhat related
## 
## Call:
## lm(formula = BMIS_shame_2_Z ~ BMIS_shame_1_Z, data = wf_dt2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -1.9437 -0.3044 -0.3044  0.1321  2.8093 
## 
## Coefficients:
##                 Estimate Std. Error t value            Pr(>|t|)    
## (Intercept)    -0.003743   0.042316  -0.088                0.93    
## BMIS_shame_1_Z  0.700904   0.042380  16.538 <0.0000000000000002 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.7144 on 283 degrees of freedom
##   (33 observations deleted due to missingness)
## Multiple R-squared:  0.4915, Adjusted R-squared:  0.4897 
## F-statistic: 273.5 on 1 and 283 DF,  p-value: < 0.00000000000000022
summary(lm(BMIS_shame_2_Z ~ bias_discrepancy_Z*bias_aware_Z, wf_dt2)) # bias discrepancy is what predicts shame, it is sig when bias condition is in the model, but they do not interact (close at .24 with a postive ineraction). Does interact with bias aware such that those who were more bias aware reported a larger discrepancy and ultimatley reported more shame
## 
## Call:
## lm(formula = BMIS_shame_2_Z ~ bias_discrepancy_Z * bias_aware_Z, 
##     data = wf_dt2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -1.6095 -0.6912 -0.3006  0.6511  2.6177 
## 
## Coefficients:
##                                  Estimate Std. Error t value   Pr(>|t|)    
## (Intercept)                      0.004705   0.055763   0.084      0.933    
## bias_discrepancy_Z              -0.257326   0.056514  -4.553 0.00000788 ***
## bias_aware_Z                     0.267658   0.055618   4.812 0.00000244 ***
## bias_discrepancy_Z:bias_aware_Z -0.054683   0.060351  -0.906      0.366    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9358 on 281 degrees of freedom
##   (33 observations deleted due to missingness)
## Multiple R-squared:  0.1335, Adjusted R-squared:  0.1242 
## F-statistic: 14.43 on 3 and 281 DF,  p-value: 0.000000009085
summary(lm(shame_discrepancy ~ bias_discrepancy_Z, wf_dt2)) # larger bias discrepancy was associated with a greater change in shame, does not interact with as.factor bias
## 
## Call:
## lm(formula = shame_discrepancy ~ bias_discrepancy_Z, data = wf_dt2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.37177 -0.29593 -0.03296  0.21531  2.83252 
## 
## Coefficients:
##                     Estimate Std. Error t value    Pr(>|t|)    
## (Intercept)        -0.005542   0.043755  -0.127       0.899    
## bias_discrepancy_Z -0.232661   0.043760  -5.317 0.000000215 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.7387 on 283 degrees of freedom
##   (33 observations deleted due to missingness)
## Multiple R-squared:  0.09082,    Adjusted R-squared:  0.0876 
## F-statistic: 28.27 on 1 and 283 DF,  p-value: 0.0000002146
summary(lm(shame_discrepancy ~ bias_aware_Z*as.factor(bias), wf_dt2)) # bias awareness by itself non sig, but interacts with bias condition to predict greater increases in shame 
## 
## Call:
## lm(formula = shame_discrepancy ~ bias_aware_Z * as.factor(bias), 
##     data = wf_dt2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.33552 -0.38817  0.00329  0.25513  2.55957 
## 
## Coefficients:
##                               Estimate Std. Error t value      Pr(>|t|)    
## (Intercept)                   -0.29818    0.06271  -4.755 0.00000317436 ***
## bias_aware_Z                  -0.11342    0.06487  -1.748        0.0815 .  
## as.factor(bias)1               0.53949    0.08558   6.304 0.00000000112 ***
## bias_aware_Z:as.factor(bias)1  0.22142    0.08586   2.579        0.0104 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.7201 on 281 degrees of freedom
##   (33 observations deleted due to missingness)
## Multiple R-squared:  0.1419, Adjusted R-squared:  0.1328 
## F-statistic:  15.5 on 3 and 281 DF,  p-value: 0.000000002351
summary(lm(shame_discrepancy ~ defensive_reverse_Z, wf_dt2)) # defensiveness does not predict changes in shame 
## 
## Call:
## lm(formula = shame_discrepancy ~ defensive_reverse_Z, data = wf_dt2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.43388 -0.23859 -0.10711  0.00122  3.10294 
## 
## Coefficients:
##                     Estimate Std. Error t value Pr(>|t|)
## (Intercept)         -0.00155    0.04668  -0.033    0.974
## defensive_reverse_Z  0.07457    0.04675   1.595    0.112
## 
## Residual standard error: 0.7768 on 275 degrees of freedom
##   (41 observations deleted due to missingness)
## Multiple R-squared:  0.009166,   Adjusted R-squared:  0.005563 
## F-statistic: 2.544 on 1 and 275 DF,  p-value: 0.1119
summary(lm(shame_discrepancy ~ SE_change, wf_dt2)) # defensiveness does not predict changes in shame 
## 
## Call:
## lm(formula = shame_discrepancy ~ SE_change, data = wf_dt2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -2.4376 -0.2412 -0.1151  0.1553  3.0150 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)  
## (Intercept) -0.003262   0.045435  -0.072   0.9428  
## SE_change   -0.357664   0.148738  -2.405   0.0168 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.7669 on 283 degrees of freedom
##   (33 observations deleted due to missingness)
## Multiple R-squared:  0.02002,    Adjusted R-squared:  0.01656 
## F-statistic: 5.782 on 1 and 283 DF,  p-value: 0.01683

6. Defensiveness Regressions

summary(lm(defensive_reverse_Z ~ bias_aware_Z, wf_dt2)) # bias aware predicts defensiveness on its own but not in an interaction with bias
## 
## Call:
## lm(formula = defensive_reverse_Z ~ bias_aware_Z, data = wf_dt2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.34709 -0.70563 -0.05711  0.74082  1.97285 
## 
## Coefficients:
##                Estimate Std. Error t value Pr(>|t|)  
## (Intercept)  -0.0001677  0.0594346  -0.003   0.9978  
## bias_aware_Z -0.1451347  0.0589087  -2.464   0.0144 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.991 on 276 degrees of freedom
##   (40 observations deleted due to missingness)
## Multiple R-squared:  0.02152,    Adjusted R-squared:  0.01797 
## F-statistic:  6.07 on 1 and 276 DF,  p-value: 0.01436
summary(lm(defensive_reverse_Z ~ bias_discrepancy_Z, wf_dt2)) # bias discrepancy is associated with more defensiveness (sig with bias aware in model)
## 
## Call:
## lm(formula = defensive_reverse_Z ~ bias_discrepancy_Z, data = wf_dt2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.49872 -0.72067 -0.07504  0.72938  2.18636 
## 
## Coefficients:
##                     Estimate Std. Error t value    Pr(>|t|)    
## (Intercept)        -0.005674   0.057373  -0.099       0.921    
## bias_discrepancy_Z -0.304138   0.058734  -5.178 0.000000432 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9564 on 276 degrees of freedom
##   (40 observations deleted due to missingness)
## Multiple R-squared:  0.08855,    Adjusted R-squared:  0.08525 
## F-statistic: 26.81 on 1 and 276 DF,  p-value: 0.0000004323
summary(lm(SE_2_Z ~ defensive_reverse_Z*as.factor(bias), wf_dt2)) # SE 2 and SE change are related to defensivness until bias condition is included in the model as an interaction term 
## 
## Call:
## lm(formula = SE_2_Z ~ defensive_reverse_Z * as.factor(bias), 
##     data = wf_dt2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.65241 -0.83224  0.08357  0.77345  1.84127 
## 
## Coefficients:
##                                       Estimate Std. Error t value Pr(>|t|)
## (Intercept)                          -0.003339   0.093068  -0.036    0.971
## defensive_reverse_Z                  -0.122163   0.095120  -1.284    0.200
## as.factor(bias)1                     -0.033667   0.125646  -0.268    0.789
## defensive_reverse_Z:as.factor(bias)1 -0.012228   0.126361  -0.097    0.923
## 
## Residual standard error: 0.9969 on 274 degrees of freedom
##   (40 observations deleted due to missingness)
## Multiple R-squared:  0.01818,    Adjusted R-squared:  0.007434 
## F-statistic: 1.692 on 3 and 274 DF,  p-value: 0.1691
summary(lm(defensive_reverse_Z ~ as.factor(brand_race)*as.factor(bias), wf_dt2)) # they obviosuly dont interact
## 
## Call:
## lm(formula = defensive_reverse_Z ~ as.factor(brand_race) * as.factor(bias), 
##     data = wf_dt2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.47040 -0.57936 -0.04932  0.64355  2.15452 
## 
## Coefficients:
##                                         Estimate Std. Error t value Pr(>|t|)  
## (Intercept)                              -0.2525     0.1216  -2.076   0.0388 *
## as.factor(brand_race)1                   -0.1213     0.1694  -0.716   0.4744  
## as.factor(bias)1                          0.4212     0.1630   2.585   0.0103 *
## as.factor(brand_race)1:as.factor(bias)1   0.3300     0.2306   1.431   0.1537  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9578 on 274 degrees of freedom
##   (40 observations deleted due to missingness)
## Multiple R-squared:  0.0926, Adjusted R-squared:  0.08266 
## F-statistic:  9.32 on 3 and 274 DF,  p-value: 0.000006898
summary(lm(defensive_reverse_Z ~ as.factor(iat_prev), wf_dt2)) # IAt previous does not predict defensiveness
## 
## Call:
## lm(formula = defensive_reverse_Z ~ as.factor(iat_prev), data = wf_dt2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -2.1071 -0.7751 -0.1295  0.7982  1.8074 
## 
## Coefficients:
##                       Estimate Std. Error t value Pr(>|t|)
## (Intercept)            0.01409    0.07425   0.190    0.850
## as.factor(iat_prev)22 -0.04081    0.12634  -0.323    0.747
## 
## Residual standard error: 1.002 on 276 degrees of freedom
##   (40 observations deleted due to missingness)
## Multiple R-squared:  0.0003779,  Adjusted R-squared:  -0.003244 
## F-statistic: 0.1044 on 1 and 276 DF,  p-value: 0.7469
summary(lm(defensive_reverse_Z ~ as.factor(gender), wf_dt2)) # nothing with gender
## 
## Call:
## lm(formula = defensive_reverse_Z ~ as.factor(gender), data = wf_dt2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.17782 -0.70443 -0.05881  0.72748  1.87806 
## 
## Coefficients:
##                    Estimate Std. Error t value Pr(>|t|)
## (Intercept)        -0.09737    0.08829  -1.103    0.271
## as.factor(gender)2  0.18215    0.12239   1.488    0.138
## as.factor(gender)3  0.21789    0.38918   0.560    0.576
## as.factor(gender)4  0.26400    1.00670   0.262    0.793
## as.factor(gender)5 -0.22021    1.00670  -0.219    0.827
## 
## Residual standard error: 1.003 on 273 degrees of freedom
##   (40 observations deleted due to missingness)
## Multiple R-squared:  0.008879,   Adjusted R-squared:  -0.005643 
## F-statistic: 0.6114 on 4 and 273 DF,  p-value: 0.6547
summary(lm(defensive_reverse_Z ~ education + bias_aware_Z, wf_dt2)) # education is negativley related to defensiveness, and the effect holds even while bias aware is in the model
## 
## Call:
## lm(formula = defensive_reverse_Z ~ education + bias_aware_Z, 
##     data = wf_dt2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.21614 -0.66533 -0.07358  0.65799  1.99089 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)   
## (Intercept)   0.50493    0.19941   2.532  0.01189 * 
## education    -0.11909    0.04493  -2.651  0.00849 **
## bias_aware_Z -0.12196    0.05893  -2.070  0.03942 * 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9803 on 275 degrees of freedom
##   (40 observations deleted due to missingness)
## Multiple R-squared:  0.0459, Adjusted R-squared:  0.03896 
## F-statistic: 6.615 on 2 and 275 DF,  p-value: 0.001564
summary(lm(defensive_reverse_Z ~ age, wf_dt2)) # nothing with age
## 
## Call:
## lm(formula = defensive_reverse_Z ~ age, data = wf_dt2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -2.1370 -0.7582 -0.1160  0.7711  1.8209 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.101593   0.197490  -0.514    0.607
## age          0.002274   0.004284   0.531    0.596
## 
## Residual standard error: 1.005 on 274 degrees of freedom
##   (42 observations deleted due to missingness)
## Multiple R-squared:  0.001027,   Adjusted R-squared:  -0.002619 
## F-statistic: 0.2818 on 1 and 274 DF,  p-value: 0.596

6. Purchase Intentions

summary(lm(purchase_Z ~ as.factor(bias)*as.factor(brand_race), wf_dt2)) # main interaction working, it is defensiveness driving this effect, whe defensiveness is included as a term in this model the effect vanishes, but defensiveness on its own is still negatively correlated with the dv, it also loses its sig for shame and se, but they are not signifgicant predictors of the dv
## 
## Call:
## lm(formula = purchase_Z ~ as.factor(bias) * as.factor(brand_race), 
##     data = wf_dt2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -2.0077 -0.8102  0.1360  0.8310  2.0630 
## 
## Coefficients:
##                                         Estimate Std. Error t value   Pr(>|t|)
## (Intercept)                              -0.4785     0.1031  -4.640 0.00000513
## as.factor(bias)1                          0.5076     0.1486   3.417   0.000717
## as.factor(brand_race)1                    0.7076     0.1516   4.666 0.00000456
## as.factor(bias)1:as.factor(brand_race)1  -0.4563     0.2148  -2.124   0.034459
##                                            
## (Intercept)                             ***
## as.factor(bias)1                        ***
## as.factor(brand_race)1                  ***
## as.factor(bias)1:as.factor(brand_race)1 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9564 on 314 degrees of freedom
## Multiple R-squared:  0.09392,    Adjusted R-squared:  0.08527 
## F-statistic: 10.85 on 3 and 314 DF,  p-value: 0.000000842
summary(lm(purchase ~ shame_discrepancy*as.factor(bias), wf_dt2)) # shame does not predict it neither does shame change, and it dosent ineract with anything to predict it
## 
## Call:
## lm(formula = purchase ~ shame_discrepancy * as.factor(bias), 
##     data = wf_dt2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -4.0120 -1.6944  0.0102  1.0594  3.0237 
## 
## Coefficients:
##                                    Estimate Std. Error t value
## (Intercept)                         3.98847    0.17856  22.337
## shame_discrepancy                  -0.01342    0.28019  -0.048
## as.factor(bias)1                    0.04971    0.23526   0.211
## shame_discrepancy:as.factor(bias)1  0.28505    0.33075   0.862
##                                               Pr(>|t|)    
## (Intercept)                        <0.0000000000000002 ***
## shame_discrepancy                                0.962    
## as.factor(bias)1                                 0.833    
## shame_discrepancy:as.factor(bias)1               0.390    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.819 on 281 degrees of freedom
##   (33 observations deleted due to missingness)
## Multiple R-squared:  0.009381,   Adjusted R-squared:  -0.001195 
## F-statistic: 0.887 on 3 and 281 DF,  p-value: 0.4482
summary(lm(purchase ~ defensive_reverse_Z*as.factor(brand_race), wf_dt2)) # defensiveness does predict it
## 
## Call:
## lm(formula = purchase ~ defensive_reverse_Z * as.factor(brand_race), 
##     data = wf_dt2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -4.5823 -1.2294  0.2343  1.4127  3.6940 
## 
## Coefficients:
##                                            Estimate Std. Error t value
## (Intercept)                                  3.6915     0.1490  24.776
## defensive_reverse_Z                         -0.4747     0.1504  -3.156
## as.factor(brand_race)1                       0.7093     0.2115   3.354
## defensive_reverse_Z:as.factor(brand_race)1   0.3493     0.2119   1.649
##                                                        Pr(>|t|)    
## (Intercept)                                < 0.0000000000000002 ***
## defensive_reverse_Z                                    0.001776 ** 
## as.factor(brand_race)1                                 0.000909 ***
## defensive_reverse_Z:as.factor(brand_race)1             0.100347    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.763 on 274 degrees of freedom
##   (40 observations deleted due to missingness)
## Multiple R-squared:  0.07302,    Adjusted R-squared:  0.06287 
## F-statistic: 7.194 on 3 and 274 DF,  p-value: 0.0001152
summary(lm(purchase ~ SE_change, wf_dt2)) # self-esteem change does not
## 
## Call:
## lm(formula = purchase ~ SE_change, data = wf_dt2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -4.0669 -1.7604 -0.0625  0.9375  3.0428 
## 
## Coefficients:
##             Estimate Std. Error t value            Pr(>|t|)    
## (Intercept)   4.0602     0.1077  37.707 <0.0000000000000002 ***
## SE_change    -0.1422     0.3531  -0.403               0.687    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.821 on 284 degrees of freedom
##   (32 observations deleted due to missingness)
## Multiple R-squared:  0.0005709,  Adjusted R-squared:  -0.002948 
## F-statistic: 0.1622 on 1 and 284 DF,  p-value: 0.6874
summary(lm(purchase ~ bias_discrepancy, wf_dt2)) # BD does not
## 
## Call:
## lm(formula = purchase ~ bias_discrepancy, data = wf_dt2)
## 
## Residuals:
##    Min     1Q Median     3Q    Max 
## -4.130 -1.684 -0.038  1.123  3.238 
## 
## Coefficients:
##                  Estimate Std. Error t value            Pr(>|t|)    
## (Intercept)       4.03803    0.11084  36.430 <0.0000000000000002 ***
## bias_discrepancy -0.04604    0.05749  -0.801               0.424    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.819 on 284 degrees of freedom
##   (32 observations deleted due to missingness)
## Multiple R-squared:  0.002253,   Adjusted R-squared:  -0.00126 
## F-statistic: 0.6413 on 1 and 284 DF,  p-value: 0.4239
summary(lm(purchase ~ bias_aware_Z*as.factor(brand_race)*as.factor(bias), wf_dt2)) # BA does not
## 
## Call:
## lm(formula = purchase ~ bias_aware_Z * as.factor(brand_race) * 
##     as.factor(bias), data = wf_dt2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -4.3864 -1.6991  0.2832  1.6628  3.7582 
## 
## Coefficients:
##                                                       Estimate Std. Error
## (Intercept)                                           3.243069   0.236294
## bias_aware_Z                                         -0.002706   0.232092
## as.factor(brand_race)1                                0.863088   0.330695
## as.factor(bias)1                                      0.470247   0.323645
## bias_aware_Z:as.factor(brand_race)1                  -0.230354   0.346281
## bias_aware_Z:as.factor(bias)1                        -0.006271   0.309271
## as.factor(brand_race)1:as.factor(bias)1              -0.337603   0.456278
## bias_aware_Z:as.factor(brand_race)1:as.factor(bias)1  0.134647   0.462314
##                                                      t value
## (Intercept)                                           13.725
## bias_aware_Z                                          -0.012
## as.factor(brand_race)1                                 2.610
## as.factor(bias)1                                       1.453
## bias_aware_Z:as.factor(brand_race)1                   -0.665
## bias_aware_Z:as.factor(bias)1                         -0.020
## as.factor(brand_race)1:as.factor(bias)1               -0.740
## bias_aware_Z:as.factor(brand_race)1:as.factor(bias)1   0.291
##                                                                  Pr(>|t|)    
## (Intercept)                                          < 0.0000000000000002 ***
## bias_aware_Z                                                      0.99070    
## as.factor(brand_race)1                                            0.00952 ** 
## as.factor(bias)1                                                  0.14730    
## bias_aware_Z:as.factor(brand_race)1                               0.50643    
## bias_aware_Z:as.factor(bias)1                                     0.98384    
## as.factor(brand_race)1:as.factor(bias)1                           0.45995    
## bias_aware_Z:as.factor(brand_race)1:as.factor(bias)1              0.77107    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.971 on 294 degrees of freedom
##   (16 observations deleted due to missingness)
## Multiple R-squared:  0.04148,    Adjusted R-squared:  0.01866 
## F-statistic: 1.817 on 7 and 294 DF,  p-value: 0.08359
summary(lm(purchase ~ BMIS_calm_2*as.factor(brand_race)*as.factor(bias), wf_dt2)) # BA does not
## 
## Call:
## lm(formula = purchase ~ BMIS_calm_2 * as.factor(brand_race) * 
##     as.factor(bias), data = wf_dt2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -4.4348 -1.3228  0.1744  1.3861  3.5233 
## 
## Coefficients:
##                                                     Estimate Std. Error t value
## (Intercept)                                           1.7381     0.9518   1.826
## BMIS_calm_2                                           0.5847     0.2991   1.955
## as.factor(brand_race)1                                2.1967     1.2436   1.766
## as.factor(bias)1                                      1.5642     1.1364   1.376
## BMIS_calm_2:as.factor(brand_race)1                   -0.4180     0.3944  -1.060
## BMIS_calm_2:as.factor(bias)1                         -0.4102     0.3613  -1.135
## as.factor(brand_race)1:as.factor(bias)1              -1.8100     1.5519  -1.166
## BMIS_calm_2:as.factor(brand_race)1:as.factor(bias)1   0.4939     0.4985   0.991
##                                                     Pr(>|t|)  
## (Intercept)                                           0.0689 .
## BMIS_calm_2                                           0.0516 .
## as.factor(brand_race)1                                0.0784 .
## as.factor(bias)1                                      0.1698  
## BMIS_calm_2:as.factor(brand_race)1                    0.2901  
## BMIS_calm_2:as.factor(bias)1                          0.2572  
## as.factor(brand_race)1:as.factor(bias)1               0.2445  
## BMIS_calm_2:as.factor(brand_race)1:as.factor(bias)1   0.3226  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.781 on 278 degrees of freedom
##   (32 observations deleted due to missingness)
## Multiple R-squared:  0.06422,    Adjusted R-squared:  0.04066 
## F-statistic: 2.726 on 7 and 278 DF,  p-value: 0.009473

6. Shop Intentions

summary(lm(shop_intentions_Z ~ as.factor(bias)*as.factor(brand_race), wf_dt2)) # main interaction working, it is defensiveness driving this effect, whe defensiveness is included as a term in this model the effect vanishes, but defensiveness on its own is still negatively correlated with the dv, it also loses its sig for shame and se, but they are not signifgicant predictors of the dv
## 
## Call:
## lm(formula = shop_intentions_Z ~ as.factor(bias) * as.factor(brand_race), 
##     data = wf_dt2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.03843 -0.86980  0.06421  0.76362  1.92776 
## 
## Coefficients:
##                                         Estimate Std. Error t value   Pr(>|t|)
## (Intercept)                              -0.4460     0.1038  -4.297 0.00002310
## as.factor(bias)1                          0.4625     0.1495   3.094    0.00215
## as.factor(brand_race)1                    0.6971     0.1526   4.568 0.00000707
## as.factor(bias)1:as.factor(brand_race)1  -0.4773     0.2162  -2.208    0.02800
##                                            
## (Intercept)                             ***
## as.factor(bias)1                        ** 
## as.factor(brand_race)1                  ***
## as.factor(bias)1:as.factor(brand_race)1 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9624 on 314 degrees of freedom
## Multiple R-squared:  0.08251,    Adjusted R-squared:  0.07374 
## F-statistic: 9.412 on 3 and 314 DF,  p-value: 0.000005659
summary(lm(shop_intentions ~ shame_discrepancy , wf_dt2)) # increases in shame from t1 to t2 are associated with increases in shopping attentions, this effect is for across all of the conditions 
## 
## Call:
## lm(formula = shop_intentions ~ shame_discrepancy, data = wf_dt2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -4.2249 -1.2249 -0.1547  1.2210  3.3995 
## 
## Coefficients:
##                   Estimate Std. Error t value            Pr(>|t|)    
## (Intercept)         4.2505     0.1066  39.858 <0.0000000000000002 ***
## shame_discrepancy   0.2670     0.1381   1.932              0.0543 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.8 on 283 degrees of freedom
##   (33 observations deleted due to missingness)
## Multiple R-squared:  0.01302,    Adjusted R-squared:  0.009537 
## F-statistic: 3.734 on 1 and 283 DF,  p-value: 0.0543
summary(lm(shop_intentions_Z ~ defensive_reverse_Z*as.factor(brand_race), wf_dt2)) # defensiveness does predict it, no interaction with brand race
## 
## Call:
## lm(formula = shop_intentions_Z ~ defensive_reverse_Z * as.factor(brand_race), 
##     data = wf_dt2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -1.8980 -0.3948  0.1070  0.6027  1.6065 
## 
## Coefficients:
##                                            Estimate Std. Error t value Pr(>|t|)
## (Intercept)                                 0.04045    0.06938   0.583  0.56035
## defensive_reverse_Z                        -0.20333    0.07003  -2.903  0.00399
## as.factor(brand_race)1                      0.31634    0.09848   3.212  0.00147
## defensive_reverse_Z:as.factor(brand_race)1  0.09775    0.09865   0.991  0.32265
##                                              
## (Intercept)                                  
## defensive_reverse_Z                        **
## as.factor(brand_race)1                     **
## defensive_reverse_Z:as.factor(brand_race)1   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8208 on 274 degrees of freedom
##   (40 observations deleted due to missingness)
## Multiple R-squared:  0.07025,    Adjusted R-squared:  0.06007 
## F-statistic: 6.901 on 3 and 274 DF,  p-value: 0.0001701
summary(lm(shop_intentions_Z ~ SE_change, wf_dt2)) # self-esteem change does not
## 
## Call:
## lm(formula = shop_intentions_Z ~ SE_change, data = wf_dt2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.98969 -0.58777 -0.08654  0.80260  1.31627 
## 
## Coefficients:
##             Estimate Std. Error t value  Pr(>|t|)    
## (Intercept)  0.20023    0.05004   4.002 0.0000803 ***
## SE_change   -0.04692    0.16408  -0.286     0.775    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.846 on 284 degrees of freedom
##   (32 observations deleted due to missingness)
## Multiple R-squared:  0.0002878,  Adjusted R-squared:  -0.003232 
## F-statistic: 0.08176 on 1 and 284 DF,  p-value: 0.7751
summary(lm(shop_intentions_Z ~ bias_discrepancy*as.factor(bias), wf_dt2)) # BD does not
## 
## Call:
## lm(formula = shop_intentions_Z ~ bias_discrepancy * as.factor(bias), 
##     data = wf_dt2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.96753 -0.54975 -0.03918  0.69974  1.67522 
## 
## Coefficients:
##                                   Estimate Std. Error t value Pr(>|t|)    
## (Intercept)                        0.27664    0.08194   3.376 0.000838 ***
## bias_discrepancy                  -0.07834    0.04611  -1.699 0.090404 .  
## as.factor(bias)1                  -0.06287    0.13353  -0.471 0.638136    
## bias_discrepancy:as.factor(bias)1  0.09510    0.06908   1.377 0.169694    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8446 on 282 degrees of freedom
##   (32 observations deleted due to missingness)
## Multiple R-squared:  0.01075,    Adjusted R-squared:  0.000223 
## F-statistic: 1.021 on 3 and 282 DF,  p-value: 0.3836
summary(lm(shop_intentions_Z ~ bias_aware_Z, wf_dt2)) # BA does not
## 
## Call:
## lm(formula = shop_intentions_Z ~ bias_aware_Z, data = wf_dt2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.88858 -0.94082 -0.00988  0.45875  1.39600 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)  
## (Intercept)   0.094689   0.053897   1.757    0.080 .
## bias_aware_Z -0.003788   0.053986  -0.070    0.944  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9366 on 300 degrees of freedom
##   (16 observations deleted due to missingness)
## Multiple R-squared:  1.641e-05,  Adjusted R-squared:  -0.003317 
## F-statistic: 0.004924 on 1 and 300 DF,  p-value: 0.9441
summary(lm(SE_2 ~ shop_intentions_Z + as.factor(brand_race), wf_dt2)) # BMIS_calm does not
## 
## Call:
## lm(formula = SE_2 ~ shop_intentions_Z + as.factor(brand_race), 
##     data = wf_dt2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.34586 -0.99447  0.06743  1.00553  2.48072 
## 
## Coefficients:
##                        Estimate Std. Error t value             Pr(>|t|)    
## (Intercept)             2.83289    0.10930  25.919 < 0.0000000000000002 ***
## shop_intentions_Z       0.29499    0.09292   3.175              0.00167 ** 
## as.factor(brand_race)1  0.07586    0.15670   0.484              0.62869    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.302 on 283 degrees of freedom
##   (32 observations deleted due to missingness)
## Multiple R-squared:  0.03834,    Adjusted R-squared:  0.03154 
## F-statistic: 5.641 on 2 and 283 DF,  p-value: 0.003961

7. Word of Mouth

summary(lm(wom_Z ~ as.factor(bias)*as.factor(brand_race), wf_dt2)) # main interaction working, it is defensiveness driving this effect, whe defensiveness is included as a term in this model the effect vanishes, but defensiveness on its own is still negatively correlated with the dv, it also loses its sig for shame and se, but they are not signifgicant predictors of the dv
## 
## Call:
## lm(formula = wom_Z ~ as.factor(bias) * as.factor(brand_race), 
##     data = wf_dt2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -2.0582 -0.7747  0.1550  0.7199  2.0588 
## 
## Coefficients:
##                                         Estimate Std. Error t value    Pr(>|t|)
## (Intercept)                              -0.4693     0.1024  -4.584 0.000006596
## as.factor(bias)1                          0.3870     0.1475   2.624     0.00911
## as.factor(brand_race)1                    0.7853     0.1505   5.217 0.000000331
## as.factor(bias)1:as.factor(brand_race)1  -0.4010     0.2133  -1.880     0.06099
##                                            
## (Intercept)                             ***
## as.factor(bias)1                        ** 
## as.factor(brand_race)1                  ***
## as.factor(bias)1:as.factor(brand_race)1 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9494 on 314 degrees of freedom
## Multiple R-squared:  0.1072, Adjusted R-squared:  0.09868 
## F-statistic: 12.57 on 3 and 314 DF,  p-value: 0.00000008821
summary(lm(wom_Z ~ shame_discrepancy, wf_dt2)) # shame does not predict it neither does shame change, and it doesn't interact with anything to predict it
## 
## Call:
## lm(formula = wom_Z ~ shame_discrepancy, data = wf_dt2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.66782 -0.49641 -0.02045  0.46941  1.65472 
## 
## Coefficients:
##                   Estimate Std. Error t value Pr(>|t|)    
## (Intercept)        0.19227    0.05057   3.802 0.000176 ***
## shame_discrepancy  0.10573    0.06551   1.614 0.107629    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8537 on 283 degrees of freedom
##   (33 observations deleted due to missingness)
## Multiple R-squared:  0.009122,   Adjusted R-squared:  0.00562 
## F-statistic: 2.605 on 1 and 283 DF,  p-value: 0.1076
summary(lm(wom_Z ~ defensive_reverse_Z*as.factor(brand_race), wf_dt2)) # defensiveness does predict it
## 
## Call:
## lm(formula = wom_Z ~ defensive_reverse_Z * as.factor(brand_race), 
##     data = wf_dt2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.92602 -0.42021  0.08609  0.53949  1.75657 
## 
## Coefficients:
##                                            Estimate Std. Error t value
## (Intercept)                                -0.03836    0.06931  -0.553
## defensive_reverse_Z                        -0.15842    0.06996  -2.264
## as.factor(brand_race)1                      0.46415    0.09837   4.718
## defensive_reverse_Z:as.factor(brand_race)1  0.04662    0.09855   0.473
##                                             Pr(>|t|)    
## (Intercept)                                   0.5804    
## defensive_reverse_Z                           0.0243 *  
## as.factor(brand_race)1                     0.0000038 ***
## defensive_reverse_Z:as.factor(brand_race)1    0.6365    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.82 on 274 degrees of freedom
##   (40 observations deleted due to missingness)
## Multiple R-squared:  0.09728,    Adjusted R-squared:  0.0874 
## F-statistic: 9.843 on 3 and 274 DF,  p-value: 0.000003472
summary(lm(wom_Z ~ SE_change, wf_dt2)) # self-esteem change does not
## 
## Call:
## lm(formula = wom_Z ~ SE_change, data = wf_dt2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -1.4919 -0.5102 -0.0340  0.4420  1.4087 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  0.19504    0.05073   3.844 0.000149 ***
## SE_change   -0.01925    0.16637  -0.116 0.907976    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8578 on 284 degrees of freedom
##   (32 observations deleted due to missingness)
## Multiple R-squared:  4.713e-05,  Adjusted R-squared:  -0.003474 
## F-statistic: 0.01339 on 1 and 284 DF,  p-value: 0.908
summary(lm(wom_Z ~ bias_discrepancy, wf_dt2)) # BD does not
## 
## Call:
## lm(formula = wom_Z ~ bias_discrepancy, data = wf_dt2)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.52951 -0.50224 -0.02628  0.44968  1.49203 
## 
## Coefficients:
##                  Estimate Std. Error t value Pr(>|t|)    
## (Intercept)       0.18792    0.05224   3.597 0.000379 ***
## bias_discrepancy -0.01507    0.02710  -0.556 0.578549    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8574 on 284 degrees of freedom
##   (32 observations deleted due to missingness)
## Multiple R-squared:  0.001088,   Adjusted R-squared:  -0.002429 
## F-statistic: 0.3093 on 1 and 284 DF,  p-value: 0.5785
summary(lm(wom_Z ~ bias_aware_Z*as.factor(brand_race)*as.factor(bias), wf_dt2)) # BA does not
## 
## Call:
## lm(formula = wom_Z ~ bias_aware_Z * as.factor(brand_race) * as.factor(bias), 
##     data = wf_dt2)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -2.1145 -0.6867  0.2219  0.7105  1.7909 
## 
## Coefficients:
##                                                      Estimate Std. Error
## (Intercept)                                          -0.18449    0.11062
## bias_aware_Z                                          0.07858    0.10866
## as.factor(brand_race)1                                0.49146    0.15482
## as.factor(bias)1                                      0.09996    0.15152
## bias_aware_Z:as.factor(brand_race)1                  -0.15133    0.16211
## bias_aware_Z:as.factor(bias)1                        -0.05385    0.14479
## as.factor(brand_race)1:as.factor(bias)1              -0.10521    0.21361
## bias_aware_Z:as.factor(brand_race)1:as.factor(bias)1  0.12039    0.21643
##                                                      t value Pr(>|t|)   
## (Intercept)                                           -1.668  0.09643 . 
## bias_aware_Z                                           0.723  0.47015   
## as.factor(brand_race)1                                 3.174  0.00166 **
## as.factor(bias)1                                       0.660  0.50995   
## bias_aware_Z:as.factor(brand_race)1                   -0.934  0.35133   
## bias_aware_Z:as.factor(bias)1                         -0.372  0.71023   
## as.factor(brand_race)1:as.factor(bias)1               -0.493  0.62270   
## bias_aware_Z:as.factor(brand_race)1:as.factor(bias)1   0.556  0.57847   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9228 on 294 degrees of freedom
##   (16 observations deleted due to missingness)
## Multiple R-squared:  0.05836,    Adjusted R-squared:  0.03594 
## F-statistic: 2.603 on 7 and 294 DF,  p-value: 0.01278

*** Correlation Matrices ****

colnames(wf_dt2)
##   [1] "ParticipantID"              "StartDate"                 
##   [3] "EndDate"                    "Status"                    
##   [5] "Progress"                   "Duration (in seconds)"     
##   [7] "Finished"                   "RecordedDate"              
##   [9] "ResponseId"                 "DistributionChannel"       
##  [11] "UserLanguage"               "consent"                   
##  [13] "prolificID"                 "BA_1"                      
##  [15] "BA_2"                       "BA_3"                      
##  [17] "BA_4"                       "SE1"                       
##  [19] "BMIS_sad_1"                 "BMIS_shame_1"              
##  [21] "BMIS_guilt_1"               "BMIS_tired_1"              
##  [23] "BMIS_nervous_1"             "BMIS_calm_1"               
##  [25] "BMIS_fedup_1"               "BMIS_loving_1"             
##  [27] "BMIS_angry_1"               "BMIS_lively_1"             
##  [29] "BMIS_caring_1"              "BMIS_content_1"            
##  [31] "BMIS_gloomy_1"              "BMIS_jittery_1"            
##  [33] "BMIS_drowsy_1"              "BMIS_happy_1"              
##  [35] "Q1 RP1"                     "Q2 RP2"                    
##  [37] "Q3 RP3"                     "Q4 RP4"                    
##  [39] "Q5 RP5"                     "Q6 RP6"                    
##  [41] "Q7 RP7"                     "Q8 RN1"                    
##  [43] "Q9 RN2"                     "Q10 RN3"                   
##  [45] "Q11 RN4"                    "Q12 RN5"                   
##  [47] "Q13 RN6"                    "Q14 RN7"                   
##  [49] "Q15 LP1"                    "Q16 LP2"                   
##  [51] "Q17 LP3"                    "Q18 LP4"                   
##  [53] "Q19 LP5"                    "Q20 LP6"                   
##  [55] "Q21 LP7"                    "Q22 LN1"                   
##  [57] "Q23 LN2"                    "Q24 LN3"                   
##  [59] "Q25 LN4"                    "Q26 LN5"                   
##  [61] "Q27 LN6"                    "Q28 LN7"                   
##  [63] "spwtime_First Click"        "spwtime_Last Click"        
##  [65] "spwtime_Page Submit"        "spwtime_Click Count"       
##  [67] "attentioncheck_nb"          "discrepancy_nb"            
##  [69] "BMIS_sad_2"                 "BMIS_shame_2"              
##  [71] "BMIS_guilt_2"               "BMIS_tired_2"              
##  [73] "BMIS_nervous_2"             "BMIS_calm_2"               
##  [75] "BMIS_fedup_2"               "BMIS_loving_2"             
##  [77] "BMIS_angry_2"               "BMIS_lively_2"             
##  [79] "BMIS_caring_2"              "BMIS_content_2"            
##  [81] "BMIS_gloomy_2"              "BMIS_jittery_2"            
##  [83] "BMIS_drowsy_2"              "BMIS_happy_2"              
##  [85] "credibility"                "objective"                 
##  [87] "valid"                      "useful"                    
##  [89] "rl_product_choice"          "rl_shop_intentions"        
##  [91] "rl_purchase"                "rl_wom"                    
##  [93] "rr_product_choice"          "rr_shop_intentions"        
##  [95] "rr_purchase"                "rr_wom"                    
##  [97] "attentioncheck_b"           "discrepancy_b"             
##  [99] "SE_2"                       "age"                       
## [101] "race"                       "education"                 
## [103] "polit_affil"                "polit_affil_4_TEXT"        
## [105] "polit_affil_cont_1"         "gender"                    
## [107] "gender_4_TEXT"              "iat_prev"                  
## [109] "iat_racial"                 "iat_racial_time"           
## [111] "iat_racial_quant"           "recent_results"            
## [113] "nobias_white"               "nobias_black"              
## [115] "bias_white"                 "bias_black"                
## [117] "discrepancy_bias"           "discrepancy_nobias"        
## [119] "bias_discrepancy"           "bias_discrepancy_centered" 
## [121] "bias_discrepancy_Z"         "condition"                 
## [123] "bias"                       "brand_race"                
## [125] "shop_intentions"            "shop_intentions_centered"  
## [127] "shop_intentions_Z"          "purchase"                  
## [129] "purchase_centered"          "purchase_Z"                
## [131] "wom"                        "wom_centered"              
## [133] "wom_Z"                      "defensive"                 
## [135] "defensive_reverse"          "defensive_reverse_centered"
## [137] "defensive_reverse_Z"        "bias_aware"                
## [139] "bias_aware_center"          "bias_aware_Z"              
## [141] "BMIS_guilt_1_center"        "BMIS_guilt_1_Z"            
## [143] "BMIS_shame_1_center"        "BMIS_shame_1_Z"            
## [145] "BMIS_sad_1_center"          "BMIS_sad_1_Z"              
## [147] "BMIS_guilt_2_center"        "BMIS_guilt_2_Z"            
## [149] "BMIS_shame_2_center"        "BMIS_shame_2_Z"            
## [151] "BMIS_sad_2_center"          "BMIS_sad_2_Z"              
## [153] "guilt_shame_sad"            "guilt_shame_sad_center"    
## [155] "guilt_shame_sad_Z"          "shame_discrepancy"         
## [157] "SE_2_Z"                     "SE_1_Z"                    
## [159] "SE_change"
# Calculate correlation between variables
cor_matrix <- wf_dt2 %>%
select(
  "BMIS_shame_2_Z",
  "defensive_reverse_Z",
  "bias_aware_Z",
  "guilt_shame_sad_Z",
  "SE_2_Z",
  "shop_intentions",
  "wom",
  "purchase",
  "polit_affil_cont_1",
  "rl_shop_intentions",
  "rl_purchase",
  "rl_wom",
  "rr_shop_intentions",
  "rr_purchase",
  "rr_wom") %>%
  cor(use = "pairwise.complete.obs")

# Create a function to calculate p-values
get_p_value <- function(x, y) {
  cor_test <- cor.test(x, y, method = "pearson")
  return(cor_test$p.value)
}

# Calculate p-values for the correlations
p_values <- outer(colnames(cor_matrix), colnames(cor_matrix), 
                   Vectorize(function(x, y) get_p_value(cor_matrix[, x], cor_matrix[, y])))

# Create a dataframe for the correlation values and p-values
corr_data <- data.frame(variables = rep(colnames(cor_matrix), each = ncol(cor_matrix)),
                        others = rep(colnames(cor_matrix), ncol(cor_matrix)),
                        corr_values = as.vector(cor_matrix),
                        p_value = as.vector(p_values))

# Create a function to add asterisks based on p-values
add_asterisk <- function(p_value) {
  if (p_value < 0.05) {
    return("*")
  } else {
    return("")
  }
}

# Apply the function to generate the asterisk labels
corr_data$asterisks <- sapply(corr_data$p_value, add_asterisk)

# Create a heatmap for the correlation matrix with labels and significance asterisks
heatmap_plot <- ggplot(data = corr_data, aes(x = variables, y = others, fill = corr_values)) +
  geom_tile() +
  geom_text(aes(label = sprintf("%.2f", corr_values)), color = "black", size = 3) +  # Display correlation values as labels
  geom_text(aes(label = asterisks), color = "red", size = 5) +  # Display significance asterisks
  scale_fill_gradient2(low = "blue", mid = "white", high = "red", 
                       midpoint = 0, limits = c(-1, 1)) +
  labs(title = "Correlation Heatmap",
       x = "Variables",
       y = "Other Variables") +
  theme_minimal() +
  theme(axis.text.x = element_text(angle = 45, hjust = 1))

# Display the heatmap
print(heatmap_plot)

# Install and load the writexl package if not already installed
# install.packages("writexl")
library(writexl)

# Specify the file path where you want to save the Excel file
excel_file_path <- "correlation_matrix.xlsx"

# Write the correlation matrix to an Excel sheet
write_xlsx(corr_data, path = excel_file_path)

# Display the file path to confirm where the Excel file is saved
excel_file_path
## [1] "correlation_matrix.xlsx"